summaryrefslogtreecommitdiff
path: root/chromium/gpu/vulkan
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/gpu/vulkan')
-rw-r--r--chromium/gpu/vulkan/BUILD.gn2
-rw-r--r--chromium/gpu/vulkan/PRESUBMIT.py4
-rw-r--r--chromium/gpu/vulkan/demo/vulkan_demo.cc1
-rwxr-xr-xchromium/gpu/vulkan/generate_bindings.py177
-rw-r--r--chromium/gpu/vulkan/init/gr_vk_memory_allocator_impl.cc54
-rw-r--r--chromium/gpu/vulkan/vma_wrapper.cc63
-rw-r--r--chromium/gpu/vulkan/vma_wrapper.h16
-rw-r--r--chromium/gpu/vulkan/vulkan_command_buffer.cc3
-rw-r--r--chromium/gpu/vulkan/vulkan_command_buffer.h2
-rw-r--r--chromium/gpu/vulkan/vulkan_cxx.h42
-rw-r--r--chromium/gpu/vulkan/vulkan_cxx_unittest.cc90
-rw-r--r--chromium/gpu/vulkan/vulkan_device_queue.cc10
-rw-r--r--chromium/gpu/vulkan/vulkan_device_queue.h2
-rw-r--r--chromium/gpu/vulkan/vulkan_fence_helper.cc32
-rw-r--r--chromium/gpu/vulkan/vulkan_fence_helper.h2
-rw-r--r--chromium/gpu/vulkan/vulkan_function_pointers.cc575
-rw-r--r--chromium/gpu/vulkan/vulkan_function_pointers.h610
-rw-r--r--chromium/gpu/vulkan/vulkan_image.cc9
-rw-r--r--chromium/gpu/vulkan/vulkan_image.h3
-rw-r--r--chromium/gpu/vulkan/vulkan_image_android.cc3
-rw-r--r--chromium/gpu/vulkan/vulkan_image_fuchsia.cc1
-rw-r--r--chromium/gpu/vulkan/vulkan_image_linux.cc1
-rw-r--r--chromium/gpu/vulkan/vulkan_image_unittest.cc15
-rw-r--r--chromium/gpu/vulkan/vulkan_image_win.cc1
-rw-r--r--chromium/gpu/vulkan/vulkan_implementation.h4
-rw-r--r--chromium/gpu/vulkan/vulkan_instance.cc19
-rw-r--r--chromium/gpu/vulkan/vulkan_instance.h2
-rw-r--r--chromium/gpu/vulkan/vulkan_surface.cc27
-rw-r--r--chromium/gpu/vulkan/vulkan_surface.h7
-rw-r--r--chromium/gpu/vulkan/vulkan_swap_chain.cc418
-rw-r--r--chromium/gpu/vulkan/vulkan_swap_chain.h105
-rw-r--r--chromium/gpu/vulkan/vulkan_util.cc47
-rw-r--r--chromium/gpu/vulkan/vulkan_util.h7
-rw-r--r--chromium/gpu/vulkan/x/vulkan_implementation_x11.cc5
-rw-r--r--chromium/gpu/vulkan/x/vulkan_surface_x11.cc53
-rw-r--r--chromium/gpu/vulkan/x/vulkan_surface_x11.h15
36 files changed, 1384 insertions, 1043 deletions
diff --git a/chromium/gpu/vulkan/BUILD.gn b/chromium/gpu/vulkan/BUILD.gn
index 95d013e8bea..e0559150895 100644
--- a/chromium/gpu/vulkan/BUILD.gn
+++ b/chromium/gpu/vulkan/BUILD.gn
@@ -85,6 +85,7 @@ if (enable_vulkan) {
"vulkan_command_pool.h",
"vulkan_crash_keys.cc",
"vulkan_crash_keys.h",
+ "vulkan_cxx.h",
"vulkan_device_queue.cc",
"vulkan_device_queue.h",
"vulkan_fence_helper.cc",
@@ -203,6 +204,7 @@ if (enable_vulkan) {
"tests/basic_vulkan_test.h",
"tests/vulkan_test.cc",
"tests/vulkan_tests_main.cc",
+ "vulkan_cxx_unittest.cc",
"vulkan_fence_helper_unittest.cc",
"vulkan_image_unittest.cc",
]
diff --git a/chromium/gpu/vulkan/PRESUBMIT.py b/chromium/gpu/vulkan/PRESUBMIT.py
index c12a8c8de34..6deb715ca00 100644
--- a/chromium/gpu/vulkan/PRESUBMIT.py
+++ b/chromium/gpu/vulkan/PRESUBMIT.py
@@ -24,8 +24,8 @@ def CommonChecks(input_api, output_api):
if generated_files and not generating_files:
long_text = 'Changed files:\n'
- for file in generated_files:
- long_text += file.LocalPath() + '\n'
+ for generated_file in generated_files:
+ long_text += generated_file.LocalPath() + '\n'
long_text += '\n'
messages.append(output_api.PresubmitError(
'Vulkan function pointer generated files changed but the generator '
diff --git a/chromium/gpu/vulkan/demo/vulkan_demo.cc b/chromium/gpu/vulkan/demo/vulkan_demo.cc
index e46d726fb99..b8dc416e517 100644
--- a/chromium/gpu/vulkan/demo/vulkan_demo.cc
+++ b/chromium/gpu/vulkan/demo/vulkan_demo.cc
@@ -204,6 +204,7 @@ void VulkanDemo::RenderFrame() {
.fSignalSemaphores = &semaphore,
};
sk_surface_->flush(SkSurface::BackendSurfaceAccess::kPresent, flush_info);
+ sk_surface_->getContext()->submit();
auto backend = sk_surface_->getBackendRenderTarget(
SkSurface::kFlushRead_BackendHandleAccess);
GrVkImageInfo vk_image_info;
diff --git a/chromium/gpu/vulkan/generate_bindings.py b/chromium/gpu/vulkan/generate_bindings.py
index eed96c896b9..1067f8fa5c2 100755
--- a/chromium/gpu/vulkan/generate_bindings.py
+++ b/chromium/gpu/vulkan/generate_bindings.py
@@ -22,11 +22,13 @@ from reg import Registry
registry = Registry()
registry.loadFile(open(path.join(vulkan_reg_path, "vk.xml")))
+VULKAN_REQUIRED_API_VERSION = 'VK_API_VERSION_1_1'
+
VULKAN_UNASSOCIATED_FUNCTIONS = [
{
'functions': [
# vkGetInstanceProcAddr belongs here but is handled specially.
- # vkEnumerateInstanceVersion belongs here but is handled specially.
+ 'vkEnumerateInstanceVersion',
'vkCreateInstance',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateInstanceLayerProperties',
@@ -43,9 +45,11 @@ VULKAN_INSTANCE_FUNCTIONS = [
'vkEnumerateDeviceLayerProperties',
'vkEnumeratePhysicalDevices',
'vkGetDeviceProcAddr',
- 'vkGetPhysicalDeviceFeatures',
+ 'vkGetPhysicalDeviceFeatures2',
'vkGetPhysicalDeviceFormatProperties',
+ 'vkGetPhysicalDeviceImageFormatProperties2',
'vkGetPhysicalDeviceMemoryProperties',
+ 'vkGetPhysicalDeviceMemoryProperties2',
'vkGetPhysicalDeviceProperties',
'vkGetPhysicalDeviceQueueFamilyProperties',
]
@@ -97,22 +101,6 @@ VULKAN_INSTANCE_FUNCTIONS = [
'vkCreateImagePipeSurfaceFUCHSIA',
]
},
- {
- 'min_api_version': 'VK_API_VERSION_1_1',
- 'functions': [
- 'vkGetPhysicalDeviceImageFormatProperties2',
- ]
- },
- {
- # vkGetPhysicalDeviceFeatures2() is defined in Vulkan 1.1 or suffixed in the
- # VK_KHR_get_physical_device_properties2 extension.
- 'min_api_version': 'VK_API_VERSION_1_1',
- 'extension': 'VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME',
- 'extension_suffix': 'KHR',
- 'functions': [
- 'vkGetPhysicalDeviceFeatures2',
- ]
- },
]
VULKAN_DEVICE_FUNCTIONS = [
@@ -123,7 +111,9 @@ VULKAN_DEVICE_FUNCTIONS = [
'vkAllocateMemory',
'vkBeginCommandBuffer',
'vkBindBufferMemory',
+ 'vkBindBufferMemory2',
'vkBindImageMemory',
+ 'vkBindImageMemory2',
'vkCmdBeginRenderPass',
'vkCmdCopyBuffer',
'vkCmdCopyBufferToImage',
@@ -164,9 +154,12 @@ VULKAN_DEVICE_FUNCTIONS = [
'vkFreeMemory',
'vkInvalidateMappedMemoryRanges',
'vkGetBufferMemoryRequirements',
+ 'vkGetBufferMemoryRequirements2',
'vkGetDeviceQueue',
+ 'vkGetDeviceQueue2',
'vkGetFenceStatus',
'vkGetImageMemoryRequirements',
+ 'vkGetImageMemoryRequirements2',
'vkMapMemory',
'vkQueueSubmit',
'vkQueueWaitIdle',
@@ -178,14 +171,6 @@ VULKAN_DEVICE_FUNCTIONS = [
]
},
{
- 'min_api_version': 'VK_API_VERSION_1_1',
- 'functions': [
- 'vkGetDeviceQueue2',
- 'vkGetBufferMemoryRequirements2',
- 'vkGetImageMemoryRequirements2',
- ]
- },
- {
'ifdef': 'defined(OS_ANDROID)',
'extension':
'VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME',
@@ -278,10 +263,11 @@ LICENSE_AND_HEADER = """\
"""
-def WriteFunctionsInternal(file, functions, gen_content, check_extension=False):
+def WriteFunctionsInternal(out_file, functions, gen_content,
+ check_extension=False):
for group in functions:
if 'ifdef' in group:
- file.write('#if %s\n' % group['ifdef'])
+ out_file.write('#if %s\n' % group['ifdef'])
extension = group['extension'] if 'extension' in group else ''
min_api_version = \
@@ -289,53 +275,53 @@ def WriteFunctionsInternal(file, functions, gen_content, check_extension=False):
if not check_extension:
for func in group['functions']:
- file.write(gen_content(func))
+ out_file.write(gen_content(func))
elif not extension and not min_api_version:
for func in group['functions']:
- file.write(gen_content(func))
+ out_file.write(gen_content(func))
else:
if min_api_version:
- file.write(' if (api_version >= %s) {\n' % min_api_version)
+ out_file.write(' if (api_version >= %s) {\n' % min_api_version)
for func in group['functions']:
- file.write(
+ out_file.write(
gen_content(func))
- file.write('}\n')
+ out_file.write('}\n')
if extension:
- file.write('else ')
+ out_file.write('else ')
if extension:
- file.write('if (gfx::HasExtension(enabled_extensions, %s)) {\n' %
+ out_file.write('if (gfx::HasExtension(enabled_extensions, %s)) {\n' %
extension)
extension_suffix = \
group['extension_suffix'] if 'extension_suffix' in group \
else ''
for func in group['functions']:
- file.write(gen_content(func, extension_suffix))
+ out_file.write(gen_content(func, extension_suffix))
- file.write('}\n')
+ out_file.write('}\n')
if 'ifdef' in group:
- file.write('#endif // %s\n' % group['ifdef'])
- file.write('\n')
+ out_file.write('#endif // %s\n' % group['ifdef'])
+ out_file.write('\n')
-def WriteFunctions(file, functions, template, check_extension=False):
+def WriteFunctions(out_file, functions, template, check_extension=False):
def gen_content(func, suffix=''):
return template.substitute({'name': func,'extension_suffix': suffix})
- WriteFunctionsInternal(file, functions, gen_content, check_extension)
+ WriteFunctionsInternal(out_file, functions, gen_content, check_extension)
-def WriteFunctionDeclarations(file, functions):
- template = Template(' VulkanFunction<PFN_${name}> ${name}Fn;\n')
- WriteFunctions(file, functions, template)
+def WriteFunctionDeclarations(out_file, functions):
+ template = Template(' VulkanFunction<PFN_${name}> ${name};\n')
+ WriteFunctions(out_file, functions, template)
-def WriteMacros(file, functions):
+def WriteMacros(out_file, functions):
def gen_content(func, suffix=''):
if func not in registry.cmddict:
# Some fuchsia functions are not in the vulkan registry, so use macro for
# them.
template = Template(
- '#define $name gpu::GetVulkanFunctionPointers()->${name}Fn\n')
+ '#define $name gpu::GetVulkanFunctionPointers()->${name}\n')
return template.substitute({'name': func, 'extension_suffix' : suffix})
none_str = lambda s: s if s else ''
cmd = registry.cmddict[func].elem
@@ -348,7 +334,7 @@ def WriteMacros(file, functions):
pdecl += text + tail
n = len(params)
- callstat = 'return gpu::GetVulkanFunctionPointers()->%sFn(' % func
+ callstat = 'return gpu::GetVulkanFunctionPointers()->%s(' % func
paramdecl = '('
if n > 0:
paramnames = (''.join(t for t in p.itertext())
@@ -364,12 +350,12 @@ def WriteMacros(file, functions):
pdecl += paramdecl
return 'ALWAYS_INLINE %s { %s; }\n' % (pdecl, callstat)
- WriteFunctionsInternal(file, functions, gen_content)
+ WriteFunctionsInternal(out_file, functions, gen_content)
-def GenerateHeaderFile(file):
+def GenerateHeaderFile(out_file):
"""Generates gpu/vulkan/vulkan_function_pointers.h"""
- file.write(LICENSE_AND_HEADER +
+ out_file.write(LICENSE_AND_HEADER +
"""
#ifndef GPU_VULKAN_VULKAN_FUNCTION_POINTERS_H_
@@ -408,6 +394,8 @@ namespace gpu {
struct VulkanFunctionPointers;
+constexpr uint32_t kVulkanRequiredApiVersion = %s;
+
COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers* GetVulkanFunctionPointers();
struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
@@ -437,12 +425,12 @@ struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
public:
using Fn = R(VKAPI_PTR*)(Args...);
- explicit operator bool() {
+ explicit operator bool() const {
return !!fn_;
}
NO_SANITIZE("cfi-icall")
- R operator()(Args... args) {
+ R operator()(Args... args) const {
return fn_(args...);
}
@@ -460,28 +448,27 @@ struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
};
// Unassociated functions
- VulkanFunction<PFN_vkEnumerateInstanceVersion> vkEnumerateInstanceVersionFn;
- VulkanFunction<PFN_vkGetInstanceProcAddr> vkGetInstanceProcAddrFn;
+ VulkanFunction<PFN_vkGetInstanceProcAddr> vkGetInstanceProcAddr;
-""")
+""" % VULKAN_REQUIRED_API_VERSION)
- WriteFunctionDeclarations(file, VULKAN_UNASSOCIATED_FUNCTIONS)
+ WriteFunctionDeclarations(out_file, VULKAN_UNASSOCIATED_FUNCTIONS)
- file.write("""\
+ out_file.write("""\
// Instance functions
""")
- WriteFunctionDeclarations(file, VULKAN_INSTANCE_FUNCTIONS);
+ WriteFunctionDeclarations(out_file, VULKAN_INSTANCE_FUNCTIONS);
- file.write("""\
+ out_file.write("""\
// Device functions
""")
- WriteFunctionDeclarations(file, VULKAN_DEVICE_FUNCTIONS)
+ WriteFunctionDeclarations(out_file, VULKAN_DEVICE_FUNCTIONS)
- file.write("""\
+ out_file.write("""\
};
} // namespace gpu
@@ -489,33 +476,32 @@ struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
// Unassociated functions
""")
- WriteMacros(file, [{'functions': [ 'vkGetInstanceProcAddr' ,
- 'vkEnumerateInstanceVersion']}])
- WriteMacros(file, VULKAN_UNASSOCIATED_FUNCTIONS)
+ WriteMacros(out_file, [{'functions': [ 'vkGetInstanceProcAddr']}])
+ WriteMacros(out_file, VULKAN_UNASSOCIATED_FUNCTIONS)
- file.write("""\
+ out_file.write("""\
// Instance functions
""")
- WriteMacros(file, VULKAN_INSTANCE_FUNCTIONS);
+ WriteMacros(out_file, VULKAN_INSTANCE_FUNCTIONS);
- file.write("""\
+ out_file.write("""\
// Device functions
""")
- WriteMacros(file, VULKAN_DEVICE_FUNCTIONS)
+ WriteMacros(out_file, VULKAN_DEVICE_FUNCTIONS)
- file.write("""\
+ out_file.write("""\
#endif // GPU_VULKAN_VULKAN_FUNCTION_POINTERS_H_""")
-def WriteFunctionPointerInitialization(file, proc_addr_function, parent,
+def WriteFunctionPointerInitialization(out_file, proc_addr_function, parent,
functions):
- template = Template(""" ${name}Fn = reinterpret_cast<PFN_${name}>(
+ template = Template(""" ${name} = reinterpret_cast<PFN_${name}>(
${get_proc_addr}(${parent}, "${name}${extension_suffix}"));
- if (!${name}Fn) {
+ if (!${name}) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "${name}${extension_suffix}";
return false;
@@ -529,24 +515,24 @@ def WriteFunctionPointerInitialization(file, proc_addr_function, parent,
'name': '${name}', 'extension_suffix': '${extension_suffix}',
'get_proc_addr': proc_addr_function, 'parent': parent}))
- WriteFunctions(file, functions, template, check_extension=True)
+ WriteFunctions(out_file, functions, template, check_extension=True)
-def WriteUnassociatedFunctionPointerInitialization(file, functions):
- WriteFunctionPointerInitialization(file, 'vkGetInstanceProcAddr', 'nullptr',
- functions)
+def WriteUnassociatedFunctionPointerInitialization(out_file, functions):
+ WriteFunctionPointerInitialization(out_file, 'vkGetInstanceProcAddr',
+ 'nullptr', functions)
-def WriteInstanceFunctionPointerInitialization(file, functions):
- WriteFunctionPointerInitialization(file, 'vkGetInstanceProcAddr',
+def WriteInstanceFunctionPointerInitialization(out_file, functions):
+ WriteFunctionPointerInitialization(out_file, 'vkGetInstanceProcAddr',
'vk_instance', functions)
-def WriteDeviceFunctionPointerInitialization(file, functions):
- WriteFunctionPointerInitialization(file, 'vkGetDeviceProcAddr', 'vk_device',
- functions)
+def WriteDeviceFunctionPointerInitialization(out_file, functions):
+ WriteFunctionPointerInitialization(out_file, 'vkGetDeviceProcAddr',
+ 'vk_device', functions)
-def GenerateSourceFile(file):
+def GenerateSourceFile(out_file):
"""Generates gpu/vulkan/vulkan_function_pointers.cc"""
- file.write(LICENSE_AND_HEADER +
+ out_file.write(LICENSE_AND_HEADER +
"""
#include "gpu/vulkan/vulkan_function_pointers.h"
@@ -568,23 +554,17 @@ bool VulkanFunctionPointers::BindUnassociatedFunctionPointers() {
// vkGetInstanceProcAddr must be handled specially since it gets its function
// pointer through base::GetFunctionPOinterFromNativeLibrary(). Other Vulkan
// functions don't do this.
- vkGetInstanceProcAddrFn = reinterpret_cast<PFN_vkGetInstanceProcAddr>(
+ vkGetInstanceProcAddr = reinterpret_cast<PFN_vkGetInstanceProcAddr>(
base::GetFunctionPointerFromNativeLibrary(vulkan_loader_library,
"vkGetInstanceProcAddr"));
- if (!vkGetInstanceProcAddrFn)
+ if (!vkGetInstanceProcAddr)
return false;
-
- vkEnumerateInstanceVersionFn =
- reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
- vkGetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"));
- // vkEnumerateInstanceVersion didn't exist in Vulkan 1.0, so we should
- // proceed even if we fail to get vkEnumerateInstanceVersion pointer.
""")
WriteUnassociatedFunctionPointerInitialization(
- file, VULKAN_UNASSOCIATED_FUNCTIONS)
+ out_file, VULKAN_UNASSOCIATED_FUNCTIONS)
- file.write("""\
+ out_file.write("""\
return true;
}
@@ -593,11 +573,13 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
VkInstance vk_instance,
uint32_t api_version,
const gfx::ExtensionSet& enabled_extensions) {
+ DCHECK_GE(api_version, kVulkanRequiredApiVersion);
""")
- WriteInstanceFunctionPointerInitialization(file, VULKAN_INSTANCE_FUNCTIONS);
+ WriteInstanceFunctionPointerInitialization(
+ out_file, VULKAN_INSTANCE_FUNCTIONS);
- file.write("""\
+ out_file.write("""\
return true;
}
@@ -606,11 +588,12 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
VkDevice vk_device,
uint32_t api_version,
const gfx::ExtensionSet& enabled_extensions) {
+ DCHECK_GE(api_version, kVulkanRequiredApiVersion);
// Device functions
""")
- WriteDeviceFunctionPointerInitialization(file, VULKAN_DEVICE_FUNCTIONS)
+ WriteDeviceFunctionPointerInitialization(out_file, VULKAN_DEVICE_FUNCTIONS)
- file.write("""\
+ out_file.write("""\
return true;
}
diff --git a/chromium/gpu/vulkan/init/gr_vk_memory_allocator_impl.cc b/chromium/gpu/vulkan/init/gr_vk_memory_allocator_impl.cc
index 4192cd859ce..0a900d01805 100644
--- a/chromium/gpu/vulkan/init/gr_vk_memory_allocator_impl.cc
+++ b/chromium/gpu/vulkan/init/gr_vk_memory_allocator_impl.cc
@@ -24,9 +24,9 @@ class GrVkMemoryAllocatorImpl : public GrVkMemoryAllocator {
GrVkMemoryAllocatorImpl& operator=(const GrVkMemoryAllocatorImpl&) = delete;
private:
- bool allocateMemoryForImage(VkImage image,
- AllocationPropertyFlags flags,
- GrVkBackendMemory* backend_memory) override {
+ VkResult allocateImageMemory(VkImage image,
+ AllocationPropertyFlags flags,
+ GrVkBackendMemory* backend_memory) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
"GrVkMemoryAllocatorImpl::allocateMemoryForImage");
VmaAllocationCreateInfo info;
@@ -53,17 +53,15 @@ class GrVkMemoryAllocatorImpl : public GrVkMemoryAllocator {
VmaAllocation allocation;
VkResult result = vma::AllocateMemoryForImage(allocator_, image, &info,
&allocation, nullptr);
- if (VK_SUCCESS != result) {
- return false;
- }
- *backend_memory = reinterpret_cast<GrVkBackendMemory>(allocation);
- return true;
+ if (VK_SUCCESS == result)
+ *backend_memory = reinterpret_cast<GrVkBackendMemory>(allocation);
+ return result;
}
- bool allocateMemoryForBuffer(VkBuffer buffer,
- BufferUsage usage,
- AllocationPropertyFlags flags,
- GrVkBackendMemory* backend_memory) override {
+ VkResult allocateBufferMemory(VkBuffer buffer,
+ BufferUsage usage,
+ AllocationPropertyFlags flags,
+ GrVkBackendMemory* backend_memory) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
"GrVkMemoryAllocatorImpl::allocateMemoryForBuffer");
VmaAllocationCreateInfo info;
@@ -121,12 +119,11 @@ class GrVkMemoryAllocatorImpl : public GrVkMemoryAllocator {
&allocation, nullptr);
}
}
- if (VK_SUCCESS != result) {
- return false;
- }
- *backend_memory = reinterpret_cast<GrVkBackendMemory>(allocation);
- return true;
+ if (VK_SUCCESS == result)
+ *backend_memory = reinterpret_cast<GrVkBackendMemory>(allocation);
+
+ return result;
}
void freeMemory(const GrVkBackendMemory& memory) override {
@@ -162,15 +159,12 @@ class GrVkMemoryAllocatorImpl : public GrVkMemoryAllocator {
alloc->fBackendMemory = memory;
}
- void* mapMemory(const GrVkBackendMemory& memory) override {
+ VkResult mapMemory(const GrVkBackendMemory& memory, void** data) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
"GrVkMemoryAllocatorImpl::mapMemory");
const VmaAllocation allocation =
reinterpret_cast<const VmaAllocation>(memory);
- void* data;
- return vma::MapMemory(allocator_, allocation, &data) == VK_SUCCESS
- ? data
- : nullptr;
+ return vma::MapMemory(allocator_, allocation, data);
}
void unmapMemory(const GrVkBackendMemory& memory) override {
@@ -181,24 +175,24 @@ class GrVkMemoryAllocatorImpl : public GrVkMemoryAllocator {
vma::UnmapMemory(allocator_, allocation);
}
- void flushMappedMemory(const GrVkBackendMemory& memory,
- VkDeviceSize offset,
- VkDeviceSize size) override {
+ VkResult flushMemory(const GrVkBackendMemory& memory,
+ VkDeviceSize offset,
+ VkDeviceSize size) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
"GrVkMemoryAllocatorImpl::flushMappedMemory");
const VmaAllocation allocation =
reinterpret_cast<const VmaAllocation>(memory);
- vma::FlushAllocation(allocator_, allocation, offset, size);
+ return vma::FlushAllocation(allocator_, allocation, offset, size);
}
- void invalidateMappedMemory(const GrVkBackendMemory& memory,
- VkDeviceSize offset,
- VkDeviceSize size) override {
+ VkResult invalidateMemory(const GrVkBackendMemory& memory,
+ VkDeviceSize offset,
+ VkDeviceSize size) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
"GrVkMemoryAllocatorImpl::invalidateMappedMemory");
const VmaAllocation allocation =
reinterpret_cast<const VmaAllocation>(memory);
- vma::InvalidateAllocation(allocator_, allocation, offset, size);
+ return vma::InvalidateAllocation(allocator_, allocation, offset, size);
}
uint64_t totalUsedMemory() const override {
diff --git a/chromium/gpu/vulkan/vma_wrapper.cc b/chromium/gpu/vulkan/vma_wrapper.cc
index 1c8d72f598d..d0541c92ad6 100644
--- a/chromium/gpu/vulkan/vma_wrapper.cc
+++ b/chromium/gpu/vulkan/vma_wrapper.cc
@@ -18,27 +18,31 @@ VkResult CreateAllocator(VkPhysicalDevice physical_device,
VmaAllocator* pAllocator) {
auto* function_pointers = gpu::GetVulkanFunctionPointers();
VmaVulkanFunctions functions = {
- function_pointers->vkGetPhysicalDevicePropertiesFn.get(),
- function_pointers->vkGetPhysicalDeviceMemoryPropertiesFn.get(),
- function_pointers->vkAllocateMemoryFn.get(),
- function_pointers->vkFreeMemoryFn.get(),
- function_pointers->vkMapMemoryFn.get(),
- function_pointers->vkUnmapMemoryFn.get(),
- function_pointers->vkFlushMappedMemoryRangesFn.get(),
- function_pointers->vkInvalidateMappedMemoryRangesFn.get(),
- function_pointers->vkBindBufferMemoryFn.get(),
- function_pointers->vkBindImageMemoryFn.get(),
- function_pointers->vkGetBufferMemoryRequirementsFn.get(),
- function_pointers->vkGetImageMemoryRequirementsFn.get(),
- function_pointers->vkCreateBufferFn.get(),
- function_pointers->vkDestroyBufferFn.get(),
- function_pointers->vkCreateImageFn.get(),
- function_pointers->vkDestroyImageFn.get(),
- function_pointers->vkCmdCopyBufferFn.get(),
- function_pointers->vkGetBufferMemoryRequirements2Fn.get(),
- function_pointers->vkGetImageMemoryRequirements2Fn.get(),
+ function_pointers->vkGetPhysicalDeviceProperties.get(),
+ function_pointers->vkGetPhysicalDeviceMemoryProperties.get(),
+ function_pointers->vkAllocateMemory.get(),
+ function_pointers->vkFreeMemory.get(),
+ function_pointers->vkMapMemory.get(),
+ function_pointers->vkUnmapMemory.get(),
+ function_pointers->vkFlushMappedMemoryRanges.get(),
+ function_pointers->vkInvalidateMappedMemoryRanges.get(),
+ function_pointers->vkBindBufferMemory.get(),
+ function_pointers->vkBindImageMemory.get(),
+ function_pointers->vkGetBufferMemoryRequirements.get(),
+ function_pointers->vkGetImageMemoryRequirements.get(),
+ function_pointers->vkCreateBuffer.get(),
+ function_pointers->vkDestroyBuffer.get(),
+ function_pointers->vkCreateImage.get(),
+ function_pointers->vkDestroyImage.get(),
+ function_pointers->vkCmdCopyBuffer.get(),
+ function_pointers->vkGetBufferMemoryRequirements2.get(),
+ function_pointers->vkGetImageMemoryRequirements2.get(),
+ function_pointers->vkBindBufferMemory2.get(),
+ function_pointers->vkBindImageMemory2.get(),
+ function_pointers->vkGetPhysicalDeviceMemoryProperties2.get(),
};
+ static_assert(kVulkanRequiredApiVersion >= VK_API_VERSION_1_1, "");
VmaAllocatorCreateInfo allocator_info = {
.flags = VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT,
.physicalDevice = physical_device,
@@ -51,6 +55,7 @@ VkResult CreateAllocator(VkPhysicalDevice physical_device,
.preferredLargeHeapBlockSize = 4 * 1024 * 1024,
.pVulkanFunctions = &functions,
.instance = instance,
+ .vulkanApiVersion = kVulkanRequiredApiVersion,
};
return vmaCreateAllocator(&allocator_info, pAllocator);
@@ -113,18 +118,18 @@ void FreeMemory(VmaAllocator allocator, VmaAllocation allocation) {
vmaFreeMemory(allocator, allocation);
}
-void FlushAllocation(VmaAllocator allocator,
- VmaAllocation allocation,
- VkDeviceSize offset,
- VkDeviceSize size) {
- vmaFlushAllocation(allocator, allocation, offset, size);
+VkResult FlushAllocation(VmaAllocator allocator,
+ VmaAllocation allocation,
+ VkDeviceSize offset,
+ VkDeviceSize size) {
+ return vmaFlushAllocation(allocator, allocation, offset, size);
}
-void InvalidateAllocation(VmaAllocator allocator,
- VmaAllocation allocation,
- VkDeviceSize offset,
- VkDeviceSize size) {
- vmaInvalidateAllocation(allocator, allocation, offset, size);
+VkResult InvalidateAllocation(VmaAllocator allocator,
+ VmaAllocation allocation,
+ VkDeviceSize offset,
+ VkDeviceSize size) {
+ return vmaInvalidateAllocation(allocator, allocation, offset, size);
}
void GetAllocationInfo(VmaAllocator allocator,
diff --git a/chromium/gpu/vulkan/vma_wrapper.h b/chromium/gpu/vulkan/vma_wrapper.h
index 02e3b5ff7ae..502bdbcd15e 100644
--- a/chromium/gpu/vulkan/vma_wrapper.h
+++ b/chromium/gpu/vulkan/vma_wrapper.h
@@ -66,16 +66,16 @@ COMPONENT_EXPORT(VULKAN)
void FreeMemory(VmaAllocator allocator, VmaAllocation allocation);
COMPONENT_EXPORT(VULKAN)
-void FlushAllocation(VmaAllocator allocator,
- VmaAllocation allocation,
- VkDeviceSize offset,
- VkDeviceSize size);
+VkResult FlushAllocation(VmaAllocator allocator,
+ VmaAllocation allocation,
+ VkDeviceSize offset,
+ VkDeviceSize size);
COMPONENT_EXPORT(VULKAN)
-void InvalidateAllocation(VmaAllocator allocator,
- VmaAllocation allocation,
- VkDeviceSize offset,
- VkDeviceSize size);
+VkResult InvalidateAllocation(VmaAllocator allocator,
+ VmaAllocation allocation,
+ VkDeviceSize offset,
+ VkDeviceSize size);
COMPONENT_EXPORT(VULKAN)
void GetAllocationInfo(VmaAllocator allocator,
diff --git a/chromium/gpu/vulkan/vulkan_command_buffer.cc b/chromium/gpu/vulkan/vulkan_command_buffer.cc
index d3cb4b4fe3c..56b91da1beb 100644
--- a/chromium/gpu/vulkan/vulkan_command_buffer.cc
+++ b/chromium/gpu/vulkan/vulkan_command_buffer.cc
@@ -8,6 +8,7 @@
#include "gpu/vulkan/vulkan_command_pool.h"
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_util.h"
namespace gpu {
@@ -174,7 +175,7 @@ bool VulkanCommandBuffer::Submit(uint32_t num_wait_semaphores,
}
result =
- vkQueueSubmit(device_queue_->GetVulkanQueue(), 1, &submit_info, fence);
+ QueueSubmitHook(device_queue_->GetVulkanQueue(), 1, &submit_info, fence);
if (VK_SUCCESS != result) {
vkDestroyFence(device_queue_->GetVulkanDevice(), fence, nullptr);
diff --git a/chromium/gpu/vulkan/vulkan_command_buffer.h b/chromium/gpu/vulkan/vulkan_command_buffer.h
index f0cf3ef29a4..282c3e57471 100644
--- a/chromium/gpu/vulkan/vulkan_command_buffer.h
+++ b/chromium/gpu/vulkan/vulkan_command_buffer.h
@@ -7,8 +7,8 @@
#include <vulkan/vulkan.h>
+#include "base/check.h"
#include "base/component_export.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "gpu/vulkan/vulkan_fence_helper.h"
diff --git a/chromium/gpu/vulkan/vulkan_cxx.h b/chromium/gpu/vulkan/vulkan_cxx.h
new file mode 100644
index 00000000000..c1059c049b4
--- /dev/null
+++ b/chromium/gpu/vulkan/vulkan_cxx.h
@@ -0,0 +1,42 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_VULKAN_VULKAN_CXX_H_
+#define GPU_VULKAN_VULKAN_CXX_H_
+
+#include <ostream>
+
+#include "base/compiler_specific.h"
+
+// Disable vulkan prototypes.
+#if !defined(VK_NO_PROTOTYPES)
+#define VK_NO_PROTOTYPES 1
+#endif
+
+// Disable dynamic loader tool.
+#define VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL 0
+
+// Disable c++ exceptions.
+#define VULKAN_HPP_NO_EXCEPTIONS 1
+
+// Disable dynamic dispatch loader.
+#define VULKAN_HPP_DISPATCH_LOADER_DYNAMIC 0
+
+// Set gpu::VulkanFunctionPointers as the default dispatcher.
+#define VULKAN_HPP_DEFAULT_DISPATCHER (*gpu::GetVulkanFunctionPointers())
+#define VULKAN_HPP_DEFAULT_DISPATCHER_TYPE gpu::VulkanFunctionPointers
+
+#define VULKAN_HPP_TYPESAFE_CONVERSION
+
+#include "gpu/vulkan/vulkan_function_pointers.h"
+
+#include <vulkan/vulkan.hpp>
+
+// operator for LOG() << result
+ALWAYS_INLINE std::ostream& operator<<(std::ostream& out, vk::Result result) {
+ out << static_cast<VkResult>(result);
+ return out;
+}
+
+#endif // GPU_VULKAN_VULKAN_CXX_H_ \ No newline at end of file
diff --git a/chromium/gpu/vulkan/vulkan_cxx_unittest.cc b/chromium/gpu/vulkan/vulkan_cxx_unittest.cc
new file mode 100644
index 00000000000..b0f16eaaa61
--- /dev/null
+++ b/chromium/gpu/vulkan/vulkan_cxx_unittest.cc
@@ -0,0 +1,90 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/native_library.h"
+#include "base/path_service.h"
+#include "build/build_config.h"
+#include "gpu/vulkan/vulkan_cxx.h"
+#include "gpu/vulkan/vulkan_function_pointers.h"
+
+namespace gpu {
+
+class VulkanCXXTest : public testing::Test {
+ public:
+ VulkanCXXTest() = default;
+ ~VulkanCXXTest() override = default;
+
+ void SetUp() override {
+ use_swiftshader_ =
+ base::CommandLine::ForCurrentProcess()->HasSwitch("use-swiftshader");
+ base::FilePath path;
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_CHROMEOS) || \
+ defined(OS_FUCHSIA)
+ if (use_swiftshader_) {
+#if defined(OS_LINUX)
+ EXPECT_TRUE(base::PathService::Get(base::DIR_MODULE, &path));
+ path = path.Append("libvk_swiftshader.so");
+#else
+ return;
+#endif
+ } else {
+ path = base::FilePath("libvulkan.so.1");
+ }
+#elif defined(OS_WIN)
+ if (use_swiftshader_) {
+ EXPECT_TRUE(base::PathService::Get(base::DIR_MODULE, &path));
+ path = path.Append(L"vk_swiftshader.dll");
+ } else {
+ path = base::FilePath(L"vulkan-1.dll");
+ }
+#else
+#error "Not supported platform"
+#endif
+
+ auto* vulkan_function_pointers = GetVulkanFunctionPointers();
+ base::NativeLibraryLoadError native_library_load_error;
+ vulkan_function_pointers->vulkan_loader_library =
+ base::LoadNativeLibrary(path, &native_library_load_error);
+ EXPECT_TRUE(vulkan_function_pointers->vulkan_loader_library);
+ }
+
+ void TearDown() override {
+ auto* vulkan_function_pointers = GetVulkanFunctionPointers();
+ base::UnloadNativeLibrary(vulkan_function_pointers->vulkan_loader_library);
+ }
+
+ private:
+ bool use_swiftshader_ = false;
+};
+
+TEST_F(VulkanCXXTest, CreateInstanceUnique) {
+ auto* vulkan_function_pointers = GetVulkanFunctionPointers();
+ EXPECT_TRUE(vulkan_function_pointers->BindUnassociatedFunctionPointers());
+
+ vk::Result result;
+ uint32_t api_version;
+ std::tie(result, api_version) = vk::enumerateInstanceVersion();
+ EXPECT_EQ(result, vk::Result::eSuccess);
+ EXPECT_GE(api_version, kVulkanRequiredApiVersion);
+
+ vk::ApplicationInfo app_info("VulkanCXXTest", 0, nullptr, 0,
+ kVulkanRequiredApiVersion);
+ vk::InstanceCreateInfo instance_create_info({}, &app_info);
+ auto result_value = vk::createInstanceUnique(instance_create_info);
+ EXPECT_EQ(result_value.result, vk::Result::eSuccess);
+
+ vk::UniqueInstance instance = std::move(result_value.value);
+ EXPECT_TRUE(instance);
+
+ EXPECT_TRUE(vulkan_function_pointers->BindInstanceFunctionPointers(
+ instance.get(), kVulkanRequiredApiVersion, gfx::ExtensionSet()));
+
+ instance.reset();
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_device_queue.cc b/chromium/gpu/vulkan/vulkan_device_queue.cc
index 77127965721..6ca7cefc203 100644
--- a/chromium/gpu/vulkan/vulkan_device_queue.cc
+++ b/chromium/gpu/vulkan/vulkan_device_queue.cc
@@ -9,6 +9,7 @@
#include <utility>
#include <vector>
+#include "base/logging.h"
#include "base/strings/stringprintf.h"
#include "gpu/config/gpu_info.h" // nogncheck
#include "gpu/config/vulkan_info.h"
@@ -75,6 +76,9 @@ bool VulkanDeviceQueue::Initialize(
for (size_t i = 0; i < info.physical_devices.size(); ++i) {
const auto& device_info = info.physical_devices[i];
const auto& device_properties = device_info.properties;
+ if (device_properties.apiVersion < info.used_api_version)
+ continue;
+
const VkPhysicalDevice& device = device_info.device;
for (size_t n = 0; n < device_info.queue_families.size(); ++n) {
if ((device_info.queue_families[n].queueFlags & queue_flags) !=
@@ -172,12 +176,6 @@ bool VulkanDeviceQueue::Initialize(
}
}
- if (vk_physical_device_properties_.apiVersion < info.used_api_version) {
- LOG(ERROR) << "Physical device doesn't support version."
- << info.used_api_version;
- return false;
- }
-
crash_keys::vulkan_device_api_version.Set(
VkVersionToString(vk_physical_device_properties_.apiVersion));
crash_keys::vulkan_device_driver_version.Set(base::StringPrintf(
diff --git a/chromium/gpu/vulkan/vulkan_device_queue.h b/chromium/gpu/vulkan/vulkan_device_queue.h
index d1704fad96c..f7b23c07609 100644
--- a/chromium/gpu/vulkan/vulkan_device_queue.h
+++ b/chromium/gpu/vulkan/vulkan_device_queue.h
@@ -10,8 +10,8 @@
#include <memory>
#include "base/callback.h"
+#include "base/check_op.h"
#include "base/component_export.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "build/build_config.h"
#include "gpu/vulkan/vma_wrapper.h"
diff --git a/chromium/gpu/vulkan/vulkan_fence_helper.cc b/chromium/gpu/vulkan/vulkan_fence_helper.cc
index d7d902a42b7..c8338a4afc2 100644
--- a/chromium/gpu/vulkan/vulkan_fence_helper.cc
+++ b/chromium/gpu/vulkan/vulkan_fence_helper.cc
@@ -5,6 +5,7 @@
#include "gpu/vulkan/vulkan_fence_helper.h"
#include "base/bind.h"
+#include "base/logging.h"
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
@@ -75,32 +76,38 @@ void VulkanFenceHelper::EnqueueCleanupTaskForSubmittedWork(CleanupTask task) {
tasks_pending_fence_.emplace_back(std::move(task));
}
-void VulkanFenceHelper::ProcessCleanupTasks() {
+void VulkanFenceHelper::ProcessCleanupTasks(uint64_t retired_generation_id) {
VkDevice device = device_queue_->GetVulkanDevice();
+ if (!retired_generation_id)
+ retired_generation_id = current_generation_;
+
// Iterate over our pending cleanup fences / tasks, advancing
// |current_generation_| as far as possible.
for (const auto& tasks_for_fence : cleanup_tasks_) {
- // If we're already ahead of this task (callback modified |generation_id_|),
- // continue.
- if (tasks_for_fence.generation_id <= current_generation_)
- continue;
-
// Callback based tasks have no actual fence to wait on, keep checking
// future fences, as a callback may be delayed.
if (tasks_for_fence.UsingCallback())
continue;
VkResult result = vkGetFenceStatus(device, tasks_for_fence.fence);
- if (result == VK_NOT_READY)
+ if (result == VK_NOT_READY) {
+ retired_generation_id =
+ std::min(retired_generation_id, tasks_for_fence.generation_id - 1);
break;
- if (result != VK_SUCCESS) {
- PerformImmediateCleanup();
- return;
}
- current_generation_ = tasks_for_fence.generation_id;
+ if (result == VK_SUCCESS) {
+ retired_generation_id =
+ std::max(tasks_for_fence.generation_id, retired_generation_id);
+ continue;
+ }
+ DLOG(ERROR) << "vkGetFenceStatus() failed: " << result;
+ PerformImmediateCleanup();
+ return;
}
+ current_generation_ = retired_generation_id;
+
// Runs any cleanup tasks for generations that have passed. Create a temporary
// vector of tasks to run to avoid reentrancy issues.
std::vector<CleanupTask> tasks_to_run;
@@ -161,8 +168,7 @@ base::OnceClosure VulkanFenceHelper::CreateExternalCallback() {
// If |current_generation_| is ahead of the callback's
// |generation_id|, the callback came late. Ignore it.
if (generation_id > fence_helper->current_generation_) {
- fence_helper->current_generation_ = generation_id;
- fence_helper->ProcessCleanupTasks();
+ fence_helper->ProcessCleanupTasks(generation_id);
}
},
weak_factory_.GetWeakPtr(), generation_id);
diff --git a/chromium/gpu/vulkan/vulkan_fence_helper.h b/chromium/gpu/vulkan/vulkan_fence_helper.h
index 1b6e586aecc..571fc97ca66 100644
--- a/chromium/gpu/vulkan/vulkan_fence_helper.h
+++ b/chromium/gpu/vulkan/vulkan_fence_helper.h
@@ -101,7 +101,7 @@ class COMPONENT_EXPORT(VULKAN) VulkanFenceHelper {
// executed in order they are enqueued.
void EnqueueCleanupTaskForSubmittedWork(CleanupTask task);
// Processes CleanupTasks for which a fence has passed.
- void ProcessCleanupTasks();
+ void ProcessCleanupTasks(uint64_t retired_generation_id = 0);
// Helpers for common types:
void EnqueueSemaphoreCleanupForSubmittedWork(VkSemaphore semaphore);
void EnqueueSemaphoresCleanupForSubmittedWork(
diff --git a/chromium/gpu/vulkan/vulkan_function_pointers.cc b/chromium/gpu/vulkan/vulkan_function_pointers.cc
index 27ca23b12f7..0d40a7c687e 100644
--- a/chromium/gpu/vulkan/vulkan_function_pointers.cc
+++ b/chromium/gpu/vulkan/vulkan_function_pointers.cc
@@ -27,39 +27,41 @@ bool VulkanFunctionPointers::BindUnassociatedFunctionPointers() {
// vkGetInstanceProcAddr must be handled specially since it gets its function
// pointer through base::GetFunctionPOinterFromNativeLibrary(). Other Vulkan
// functions don't do this.
- vkGetInstanceProcAddrFn = reinterpret_cast<PFN_vkGetInstanceProcAddr>(
+ vkGetInstanceProcAddr = reinterpret_cast<PFN_vkGetInstanceProcAddr>(
base::GetFunctionPointerFromNativeLibrary(vulkan_loader_library,
"vkGetInstanceProcAddr"));
- if (!vkGetInstanceProcAddrFn)
+ if (!vkGetInstanceProcAddr)
return false;
+ vkEnumerateInstanceVersion = reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
+ vkGetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"));
+ if (!vkEnumerateInstanceVersion) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkEnumerateInstanceVersion";
+ return false;
+ }
- vkEnumerateInstanceVersionFn =
- reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
- vkGetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"));
- // vkEnumerateInstanceVersion didn't exist in Vulkan 1.0, so we should
- // proceed even if we fail to get vkEnumerateInstanceVersion pointer.
- vkCreateInstanceFn = reinterpret_cast<PFN_vkCreateInstance>(
+ vkCreateInstance = reinterpret_cast<PFN_vkCreateInstance>(
vkGetInstanceProcAddr(nullptr, "vkCreateInstance"));
- if (!vkCreateInstanceFn) {
+ if (!vkCreateInstance) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateInstance";
return false;
}
- vkEnumerateInstanceExtensionPropertiesFn =
+ vkEnumerateInstanceExtensionProperties =
reinterpret_cast<PFN_vkEnumerateInstanceExtensionProperties>(
vkGetInstanceProcAddr(nullptr,
"vkEnumerateInstanceExtensionProperties"));
- if (!vkEnumerateInstanceExtensionPropertiesFn) {
+ if (!vkEnumerateInstanceExtensionProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEnumerateInstanceExtensionProperties";
return false;
}
- vkEnumerateInstanceLayerPropertiesFn =
+ vkEnumerateInstanceLayerProperties =
reinterpret_cast<PFN_vkEnumerateInstanceLayerProperties>(
vkGetInstanceProcAddr(nullptr, "vkEnumerateInstanceLayerProperties"));
- if (!vkEnumerateInstanceLayerPropertiesFn) {
+ if (!vkEnumerateInstanceLayerProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEnumerateInstanceLayerProperties";
return false;
@@ -72,102 +74,122 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
VkInstance vk_instance,
uint32_t api_version,
const gfx::ExtensionSet& enabled_extensions) {
- vkCreateDeviceFn = reinterpret_cast<PFN_vkCreateDevice>(
+ DCHECK_GE(api_version, kVulkanRequiredApiVersion);
+ vkCreateDevice = reinterpret_cast<PFN_vkCreateDevice>(
vkGetInstanceProcAddr(vk_instance, "vkCreateDevice"));
- if (!vkCreateDeviceFn) {
+ if (!vkCreateDevice) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateDevice";
return false;
}
- vkDestroyInstanceFn = reinterpret_cast<PFN_vkDestroyInstance>(
+ vkDestroyInstance = reinterpret_cast<PFN_vkDestroyInstance>(
vkGetInstanceProcAddr(vk_instance, "vkDestroyInstance"));
- if (!vkDestroyInstanceFn) {
+ if (!vkDestroyInstance) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyInstance";
return false;
}
- vkEnumerateDeviceExtensionPropertiesFn =
+ vkEnumerateDeviceExtensionProperties =
reinterpret_cast<PFN_vkEnumerateDeviceExtensionProperties>(
vkGetInstanceProcAddr(vk_instance,
"vkEnumerateDeviceExtensionProperties"));
- if (!vkEnumerateDeviceExtensionPropertiesFn) {
+ if (!vkEnumerateDeviceExtensionProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEnumerateDeviceExtensionProperties";
return false;
}
- vkEnumerateDeviceLayerPropertiesFn =
+ vkEnumerateDeviceLayerProperties =
reinterpret_cast<PFN_vkEnumerateDeviceLayerProperties>(
vkGetInstanceProcAddr(vk_instance,
"vkEnumerateDeviceLayerProperties"));
- if (!vkEnumerateDeviceLayerPropertiesFn) {
+ if (!vkEnumerateDeviceLayerProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEnumerateDeviceLayerProperties";
return false;
}
- vkEnumeratePhysicalDevicesFn =
- reinterpret_cast<PFN_vkEnumeratePhysicalDevices>(
- vkGetInstanceProcAddr(vk_instance, "vkEnumeratePhysicalDevices"));
- if (!vkEnumeratePhysicalDevicesFn) {
+ vkEnumeratePhysicalDevices = reinterpret_cast<PFN_vkEnumeratePhysicalDevices>(
+ vkGetInstanceProcAddr(vk_instance, "vkEnumeratePhysicalDevices"));
+ if (!vkEnumeratePhysicalDevices) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEnumeratePhysicalDevices";
return false;
}
- vkGetDeviceProcAddrFn = reinterpret_cast<PFN_vkGetDeviceProcAddr>(
+ vkGetDeviceProcAddr = reinterpret_cast<PFN_vkGetDeviceProcAddr>(
vkGetInstanceProcAddr(vk_instance, "vkGetDeviceProcAddr"));
- if (!vkGetDeviceProcAddrFn) {
+ if (!vkGetDeviceProcAddr) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetDeviceProcAddr";
return false;
}
- vkGetPhysicalDeviceFeaturesFn =
- reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures>(
- vkGetInstanceProcAddr(vk_instance, "vkGetPhysicalDeviceFeatures"));
- if (!vkGetPhysicalDeviceFeaturesFn) {
+ vkGetPhysicalDeviceFeatures2 =
+ reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2>(
+ vkGetInstanceProcAddr(vk_instance, "vkGetPhysicalDeviceFeatures2"));
+ if (!vkGetPhysicalDeviceFeatures2) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkGetPhysicalDeviceFeatures";
+ << "vkGetPhysicalDeviceFeatures2";
return false;
}
- vkGetPhysicalDeviceFormatPropertiesFn =
+ vkGetPhysicalDeviceFormatProperties =
reinterpret_cast<PFN_vkGetPhysicalDeviceFormatProperties>(
vkGetInstanceProcAddr(vk_instance,
"vkGetPhysicalDeviceFormatProperties"));
- if (!vkGetPhysicalDeviceFormatPropertiesFn) {
+ if (!vkGetPhysicalDeviceFormatProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceFormatProperties";
return false;
}
- vkGetPhysicalDeviceMemoryPropertiesFn =
+ vkGetPhysicalDeviceImageFormatProperties2 =
+ reinterpret_cast<PFN_vkGetPhysicalDeviceImageFormatProperties2>(
+ vkGetInstanceProcAddr(vk_instance,
+ "vkGetPhysicalDeviceImageFormatProperties2"));
+ if (!vkGetPhysicalDeviceImageFormatProperties2) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkGetPhysicalDeviceImageFormatProperties2";
+ return false;
+ }
+
+ vkGetPhysicalDeviceMemoryProperties =
reinterpret_cast<PFN_vkGetPhysicalDeviceMemoryProperties>(
vkGetInstanceProcAddr(vk_instance,
"vkGetPhysicalDeviceMemoryProperties"));
- if (!vkGetPhysicalDeviceMemoryPropertiesFn) {
+ if (!vkGetPhysicalDeviceMemoryProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceMemoryProperties";
return false;
}
- vkGetPhysicalDevicePropertiesFn =
+ vkGetPhysicalDeviceMemoryProperties2 =
+ reinterpret_cast<PFN_vkGetPhysicalDeviceMemoryProperties2>(
+ vkGetInstanceProcAddr(vk_instance,
+ "vkGetPhysicalDeviceMemoryProperties2"));
+ if (!vkGetPhysicalDeviceMemoryProperties2) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkGetPhysicalDeviceMemoryProperties2";
+ return false;
+ }
+
+ vkGetPhysicalDeviceProperties =
reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
vkGetInstanceProcAddr(vk_instance, "vkGetPhysicalDeviceProperties"));
- if (!vkGetPhysicalDevicePropertiesFn) {
+ if (!vkGetPhysicalDeviceProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceProperties";
return false;
}
- vkGetPhysicalDeviceQueueFamilyPropertiesFn =
+ vkGetPhysicalDeviceQueueFamilyProperties =
reinterpret_cast<PFN_vkGetPhysicalDeviceQueueFamilyProperties>(
vkGetInstanceProcAddr(vk_instance,
"vkGetPhysicalDeviceQueueFamilyProperties"));
- if (!vkGetPhysicalDeviceQueueFamilyPropertiesFn) {
+ if (!vkGetPhysicalDeviceQueueFamilyProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceQueueFamilyProperties";
return false;
@@ -176,21 +198,21 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
#if DCHECK_IS_ON()
if (gfx::HasExtension(enabled_extensions,
VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
- vkCreateDebugReportCallbackEXTFn =
+ vkCreateDebugReportCallbackEXT =
reinterpret_cast<PFN_vkCreateDebugReportCallbackEXT>(
vkGetInstanceProcAddr(vk_instance,
"vkCreateDebugReportCallbackEXT"));
- if (!vkCreateDebugReportCallbackEXTFn) {
+ if (!vkCreateDebugReportCallbackEXT) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateDebugReportCallbackEXT";
return false;
}
- vkDestroyDebugReportCallbackEXTFn =
+ vkDestroyDebugReportCallbackEXT =
reinterpret_cast<PFN_vkDestroyDebugReportCallbackEXT>(
vkGetInstanceProcAddr(vk_instance,
"vkDestroyDebugReportCallbackEXT"));
- if (!vkDestroyDebugReportCallbackEXTFn) {
+ if (!vkDestroyDebugReportCallbackEXT) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyDebugReportCallbackEXT";
return false;
@@ -199,39 +221,39 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
#endif // DCHECK_IS_ON()
if (gfx::HasExtension(enabled_extensions, VK_KHR_SURFACE_EXTENSION_NAME)) {
- vkDestroySurfaceKHRFn = reinterpret_cast<PFN_vkDestroySurfaceKHR>(
+ vkDestroySurfaceKHR = reinterpret_cast<PFN_vkDestroySurfaceKHR>(
vkGetInstanceProcAddr(vk_instance, "vkDestroySurfaceKHR"));
- if (!vkDestroySurfaceKHRFn) {
+ if (!vkDestroySurfaceKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroySurfaceKHR";
return false;
}
- vkGetPhysicalDeviceSurfaceCapabilitiesKHRFn =
+ vkGetPhysicalDeviceSurfaceCapabilitiesKHR =
reinterpret_cast<PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR>(
vkGetInstanceProcAddr(vk_instance,
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR"));
- if (!vkGetPhysicalDeviceSurfaceCapabilitiesKHRFn) {
+ if (!vkGetPhysicalDeviceSurfaceCapabilitiesKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceSurfaceCapabilitiesKHR";
return false;
}
- vkGetPhysicalDeviceSurfaceFormatsKHRFn =
+ vkGetPhysicalDeviceSurfaceFormatsKHR =
reinterpret_cast<PFN_vkGetPhysicalDeviceSurfaceFormatsKHR>(
vkGetInstanceProcAddr(vk_instance,
"vkGetPhysicalDeviceSurfaceFormatsKHR"));
- if (!vkGetPhysicalDeviceSurfaceFormatsKHRFn) {
+ if (!vkGetPhysicalDeviceSurfaceFormatsKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceSurfaceFormatsKHR";
return false;
}
- vkGetPhysicalDeviceSurfaceSupportKHRFn =
+ vkGetPhysicalDeviceSurfaceSupportKHR =
reinterpret_cast<PFN_vkGetPhysicalDeviceSurfaceSupportKHR>(
vkGetInstanceProcAddr(vk_instance,
"vkGetPhysicalDeviceSurfaceSupportKHR"));
- if (!vkGetPhysicalDeviceSurfaceSupportKHRFn) {
+ if (!vkGetPhysicalDeviceSurfaceSupportKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceSurfaceSupportKHR";
return false;
@@ -241,19 +263,19 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
#if defined(USE_VULKAN_XLIB)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_XLIB_SURFACE_EXTENSION_NAME)) {
- vkCreateXlibSurfaceKHRFn = reinterpret_cast<PFN_vkCreateXlibSurfaceKHR>(
+ vkCreateXlibSurfaceKHR = reinterpret_cast<PFN_vkCreateXlibSurfaceKHR>(
vkGetInstanceProcAddr(vk_instance, "vkCreateXlibSurfaceKHR"));
- if (!vkCreateXlibSurfaceKHRFn) {
+ if (!vkCreateXlibSurfaceKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateXlibSurfaceKHR";
return false;
}
- vkGetPhysicalDeviceXlibPresentationSupportKHRFn =
+ vkGetPhysicalDeviceXlibPresentationSupportKHR =
reinterpret_cast<PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR>(
vkGetInstanceProcAddr(
vk_instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR"));
- if (!vkGetPhysicalDeviceXlibPresentationSupportKHRFn) {
+ if (!vkGetPhysicalDeviceXlibPresentationSupportKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceXlibPresentationSupportKHR";
return false;
@@ -264,19 +286,19 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
#if defined(OS_WIN)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_WIN32_SURFACE_EXTENSION_NAME)) {
- vkCreateWin32SurfaceKHRFn = reinterpret_cast<PFN_vkCreateWin32SurfaceKHR>(
+ vkCreateWin32SurfaceKHR = reinterpret_cast<PFN_vkCreateWin32SurfaceKHR>(
vkGetInstanceProcAddr(vk_instance, "vkCreateWin32SurfaceKHR"));
- if (!vkCreateWin32SurfaceKHRFn) {
+ if (!vkCreateWin32SurfaceKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateWin32SurfaceKHR";
return false;
}
- vkGetPhysicalDeviceWin32PresentationSupportKHRFn =
+ vkGetPhysicalDeviceWin32PresentationSupportKHR =
reinterpret_cast<PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR>(
vkGetInstanceProcAddr(
vk_instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR"));
- if (!vkGetPhysicalDeviceWin32PresentationSupportKHRFn) {
+ if (!vkGetPhysicalDeviceWin32PresentationSupportKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceWin32PresentationSupportKHR";
return false;
@@ -287,10 +309,9 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
#if defined(OS_ANDROID)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
- vkCreateAndroidSurfaceKHRFn =
- reinterpret_cast<PFN_vkCreateAndroidSurfaceKHR>(
- vkGetInstanceProcAddr(vk_instance, "vkCreateAndroidSurfaceKHR"));
- if (!vkCreateAndroidSurfaceKHRFn) {
+ vkCreateAndroidSurfaceKHR = reinterpret_cast<PFN_vkCreateAndroidSurfaceKHR>(
+ vkGetInstanceProcAddr(vk_instance, "vkCreateAndroidSurfaceKHR"));
+ if (!vkCreateAndroidSurfaceKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateAndroidSurfaceKHR";
return false;
@@ -301,11 +322,11 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
#if defined(OS_FUCHSIA)
if (gfx::HasExtension(enabled_extensions,
VK_FUCHSIA_IMAGEPIPE_SURFACE_EXTENSION_NAME)) {
- vkCreateImagePipeSurfaceFUCHSIAFn =
+ vkCreateImagePipeSurfaceFUCHSIA =
reinterpret_cast<PFN_vkCreateImagePipeSurfaceFUCHSIA>(
vkGetInstanceProcAddr(vk_instance,
"vkCreateImagePipeSurfaceFUCHSIA"));
- if (!vkCreateImagePipeSurfaceFUCHSIAFn) {
+ if (!vkCreateImagePipeSurfaceFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateImagePipeSurfaceFUCHSIA";
return false;
@@ -313,42 +334,6 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
}
#endif // defined(OS_FUCHSIA)
- if (api_version >= VK_API_VERSION_1_1) {
- vkGetPhysicalDeviceImageFormatProperties2Fn =
- reinterpret_cast<PFN_vkGetPhysicalDeviceImageFormatProperties2>(
- vkGetInstanceProcAddr(vk_instance,
- "vkGetPhysicalDeviceImageFormatProperties2"));
- if (!vkGetPhysicalDeviceImageFormatProperties2Fn) {
- DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkGetPhysicalDeviceImageFormatProperties2";
- return false;
- }
- }
-
- if (api_version >= VK_API_VERSION_1_1) {
- vkGetPhysicalDeviceFeatures2Fn =
- reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2>(
- vkGetInstanceProcAddr(vk_instance, "vkGetPhysicalDeviceFeatures2"));
- if (!vkGetPhysicalDeviceFeatures2Fn) {
- DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkGetPhysicalDeviceFeatures2";
- return false;
- }
-
- } else if (gfx::HasExtension(
- enabled_extensions,
- VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
- vkGetPhysicalDeviceFeatures2Fn =
- reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2>(
- vkGetInstanceProcAddr(vk_instance,
- "vkGetPhysicalDeviceFeatures2KHR"));
- if (!vkGetPhysicalDeviceFeatures2Fn) {
- DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkGetPhysicalDeviceFeatures2KHR";
- return false;
- }
- }
-
return true;
}
@@ -356,505 +341,520 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
VkDevice vk_device,
uint32_t api_version,
const gfx::ExtensionSet& enabled_extensions) {
+ DCHECK_GE(api_version, kVulkanRequiredApiVersion);
// Device functions
- vkAllocateCommandBuffersFn = reinterpret_cast<PFN_vkAllocateCommandBuffers>(
+ vkAllocateCommandBuffers = reinterpret_cast<PFN_vkAllocateCommandBuffers>(
vkGetDeviceProcAddr(vk_device, "vkAllocateCommandBuffers"));
- if (!vkAllocateCommandBuffersFn) {
+ if (!vkAllocateCommandBuffers) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkAllocateCommandBuffers";
return false;
}
- vkAllocateDescriptorSetsFn = reinterpret_cast<PFN_vkAllocateDescriptorSets>(
+ vkAllocateDescriptorSets = reinterpret_cast<PFN_vkAllocateDescriptorSets>(
vkGetDeviceProcAddr(vk_device, "vkAllocateDescriptorSets"));
- if (!vkAllocateDescriptorSetsFn) {
+ if (!vkAllocateDescriptorSets) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkAllocateDescriptorSets";
return false;
}
- vkAllocateMemoryFn = reinterpret_cast<PFN_vkAllocateMemory>(
+ vkAllocateMemory = reinterpret_cast<PFN_vkAllocateMemory>(
vkGetDeviceProcAddr(vk_device, "vkAllocateMemory"));
- if (!vkAllocateMemoryFn) {
+ if (!vkAllocateMemory) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkAllocateMemory";
return false;
}
- vkBeginCommandBufferFn = reinterpret_cast<PFN_vkBeginCommandBuffer>(
+ vkBeginCommandBuffer = reinterpret_cast<PFN_vkBeginCommandBuffer>(
vkGetDeviceProcAddr(vk_device, "vkBeginCommandBuffer"));
- if (!vkBeginCommandBufferFn) {
+ if (!vkBeginCommandBuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkBeginCommandBuffer";
return false;
}
- vkBindBufferMemoryFn = reinterpret_cast<PFN_vkBindBufferMemory>(
+ vkBindBufferMemory = reinterpret_cast<PFN_vkBindBufferMemory>(
vkGetDeviceProcAddr(vk_device, "vkBindBufferMemory"));
- if (!vkBindBufferMemoryFn) {
+ if (!vkBindBufferMemory) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkBindBufferMemory";
return false;
}
- vkBindImageMemoryFn = reinterpret_cast<PFN_vkBindImageMemory>(
+ vkBindBufferMemory2 = reinterpret_cast<PFN_vkBindBufferMemory2>(
+ vkGetDeviceProcAddr(vk_device, "vkBindBufferMemory2"));
+ if (!vkBindBufferMemory2) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkBindBufferMemory2";
+ return false;
+ }
+
+ vkBindImageMemory = reinterpret_cast<PFN_vkBindImageMemory>(
vkGetDeviceProcAddr(vk_device, "vkBindImageMemory"));
- if (!vkBindImageMemoryFn) {
+ if (!vkBindImageMemory) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkBindImageMemory";
return false;
}
- vkCmdBeginRenderPassFn = reinterpret_cast<PFN_vkCmdBeginRenderPass>(
+ vkBindImageMemory2 = reinterpret_cast<PFN_vkBindImageMemory2>(
+ vkGetDeviceProcAddr(vk_device, "vkBindImageMemory2"));
+ if (!vkBindImageMemory2) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkBindImageMemory2";
+ return false;
+ }
+
+ vkCmdBeginRenderPass = reinterpret_cast<PFN_vkCmdBeginRenderPass>(
vkGetDeviceProcAddr(vk_device, "vkCmdBeginRenderPass"));
- if (!vkCmdBeginRenderPassFn) {
+ if (!vkCmdBeginRenderPass) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdBeginRenderPass";
return false;
}
- vkCmdCopyBufferFn = reinterpret_cast<PFN_vkCmdCopyBuffer>(
+ vkCmdCopyBuffer = reinterpret_cast<PFN_vkCmdCopyBuffer>(
vkGetDeviceProcAddr(vk_device, "vkCmdCopyBuffer"));
- if (!vkCmdCopyBufferFn) {
+ if (!vkCmdCopyBuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdCopyBuffer";
return false;
}
- vkCmdCopyBufferToImageFn = reinterpret_cast<PFN_vkCmdCopyBufferToImage>(
+ vkCmdCopyBufferToImage = reinterpret_cast<PFN_vkCmdCopyBufferToImage>(
vkGetDeviceProcAddr(vk_device, "vkCmdCopyBufferToImage"));
- if (!vkCmdCopyBufferToImageFn) {
+ if (!vkCmdCopyBufferToImage) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdCopyBufferToImage";
return false;
}
- vkCmdEndRenderPassFn = reinterpret_cast<PFN_vkCmdEndRenderPass>(
+ vkCmdEndRenderPass = reinterpret_cast<PFN_vkCmdEndRenderPass>(
vkGetDeviceProcAddr(vk_device, "vkCmdEndRenderPass"));
- if (!vkCmdEndRenderPassFn) {
+ if (!vkCmdEndRenderPass) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdEndRenderPass";
return false;
}
- vkCmdExecuteCommandsFn = reinterpret_cast<PFN_vkCmdExecuteCommands>(
+ vkCmdExecuteCommands = reinterpret_cast<PFN_vkCmdExecuteCommands>(
vkGetDeviceProcAddr(vk_device, "vkCmdExecuteCommands"));
- if (!vkCmdExecuteCommandsFn) {
+ if (!vkCmdExecuteCommands) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdExecuteCommands";
return false;
}
- vkCmdNextSubpassFn = reinterpret_cast<PFN_vkCmdNextSubpass>(
+ vkCmdNextSubpass = reinterpret_cast<PFN_vkCmdNextSubpass>(
vkGetDeviceProcAddr(vk_device, "vkCmdNextSubpass"));
- if (!vkCmdNextSubpassFn) {
+ if (!vkCmdNextSubpass) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdNextSubpass";
return false;
}
- vkCmdPipelineBarrierFn = reinterpret_cast<PFN_vkCmdPipelineBarrier>(
+ vkCmdPipelineBarrier = reinterpret_cast<PFN_vkCmdPipelineBarrier>(
vkGetDeviceProcAddr(vk_device, "vkCmdPipelineBarrier"));
- if (!vkCmdPipelineBarrierFn) {
+ if (!vkCmdPipelineBarrier) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdPipelineBarrier";
return false;
}
- vkCreateBufferFn = reinterpret_cast<PFN_vkCreateBuffer>(
+ vkCreateBuffer = reinterpret_cast<PFN_vkCreateBuffer>(
vkGetDeviceProcAddr(vk_device, "vkCreateBuffer"));
- if (!vkCreateBufferFn) {
+ if (!vkCreateBuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateBuffer";
return false;
}
- vkCreateCommandPoolFn = reinterpret_cast<PFN_vkCreateCommandPool>(
+ vkCreateCommandPool = reinterpret_cast<PFN_vkCreateCommandPool>(
vkGetDeviceProcAddr(vk_device, "vkCreateCommandPool"));
- if (!vkCreateCommandPoolFn) {
+ if (!vkCreateCommandPool) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateCommandPool";
return false;
}
- vkCreateDescriptorPoolFn = reinterpret_cast<PFN_vkCreateDescriptorPool>(
+ vkCreateDescriptorPool = reinterpret_cast<PFN_vkCreateDescriptorPool>(
vkGetDeviceProcAddr(vk_device, "vkCreateDescriptorPool"));
- if (!vkCreateDescriptorPoolFn) {
+ if (!vkCreateDescriptorPool) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateDescriptorPool";
return false;
}
- vkCreateDescriptorSetLayoutFn =
+ vkCreateDescriptorSetLayout =
reinterpret_cast<PFN_vkCreateDescriptorSetLayout>(
vkGetDeviceProcAddr(vk_device, "vkCreateDescriptorSetLayout"));
- if (!vkCreateDescriptorSetLayoutFn) {
+ if (!vkCreateDescriptorSetLayout) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateDescriptorSetLayout";
return false;
}
- vkCreateFenceFn = reinterpret_cast<PFN_vkCreateFence>(
+ vkCreateFence = reinterpret_cast<PFN_vkCreateFence>(
vkGetDeviceProcAddr(vk_device, "vkCreateFence"));
- if (!vkCreateFenceFn) {
+ if (!vkCreateFence) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateFence";
return false;
}
- vkCreateFramebufferFn = reinterpret_cast<PFN_vkCreateFramebuffer>(
+ vkCreateFramebuffer = reinterpret_cast<PFN_vkCreateFramebuffer>(
vkGetDeviceProcAddr(vk_device, "vkCreateFramebuffer"));
- if (!vkCreateFramebufferFn) {
+ if (!vkCreateFramebuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateFramebuffer";
return false;
}
- vkCreateImageFn = reinterpret_cast<PFN_vkCreateImage>(
+ vkCreateImage = reinterpret_cast<PFN_vkCreateImage>(
vkGetDeviceProcAddr(vk_device, "vkCreateImage"));
- if (!vkCreateImageFn) {
+ if (!vkCreateImage) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateImage";
return false;
}
- vkCreateImageViewFn = reinterpret_cast<PFN_vkCreateImageView>(
+ vkCreateImageView = reinterpret_cast<PFN_vkCreateImageView>(
vkGetDeviceProcAddr(vk_device, "vkCreateImageView"));
- if (!vkCreateImageViewFn) {
+ if (!vkCreateImageView) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateImageView";
return false;
}
- vkCreateRenderPassFn = reinterpret_cast<PFN_vkCreateRenderPass>(
+ vkCreateRenderPass = reinterpret_cast<PFN_vkCreateRenderPass>(
vkGetDeviceProcAddr(vk_device, "vkCreateRenderPass"));
- if (!vkCreateRenderPassFn) {
+ if (!vkCreateRenderPass) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateRenderPass";
return false;
}
- vkCreateSamplerFn = reinterpret_cast<PFN_vkCreateSampler>(
+ vkCreateSampler = reinterpret_cast<PFN_vkCreateSampler>(
vkGetDeviceProcAddr(vk_device, "vkCreateSampler"));
- if (!vkCreateSamplerFn) {
+ if (!vkCreateSampler) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateSampler";
return false;
}
- vkCreateSemaphoreFn = reinterpret_cast<PFN_vkCreateSemaphore>(
+ vkCreateSemaphore = reinterpret_cast<PFN_vkCreateSemaphore>(
vkGetDeviceProcAddr(vk_device, "vkCreateSemaphore"));
- if (!vkCreateSemaphoreFn) {
+ if (!vkCreateSemaphore) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateSemaphore";
return false;
}
- vkCreateShaderModuleFn = reinterpret_cast<PFN_vkCreateShaderModule>(
+ vkCreateShaderModule = reinterpret_cast<PFN_vkCreateShaderModule>(
vkGetDeviceProcAddr(vk_device, "vkCreateShaderModule"));
- if (!vkCreateShaderModuleFn) {
+ if (!vkCreateShaderModule) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateShaderModule";
return false;
}
- vkDestroyBufferFn = reinterpret_cast<PFN_vkDestroyBuffer>(
+ vkDestroyBuffer = reinterpret_cast<PFN_vkDestroyBuffer>(
vkGetDeviceProcAddr(vk_device, "vkDestroyBuffer"));
- if (!vkDestroyBufferFn) {
+ if (!vkDestroyBuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyBuffer";
return false;
}
- vkDestroyCommandPoolFn = reinterpret_cast<PFN_vkDestroyCommandPool>(
+ vkDestroyCommandPool = reinterpret_cast<PFN_vkDestroyCommandPool>(
vkGetDeviceProcAddr(vk_device, "vkDestroyCommandPool"));
- if (!vkDestroyCommandPoolFn) {
+ if (!vkDestroyCommandPool) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyCommandPool";
return false;
}
- vkDestroyDescriptorPoolFn = reinterpret_cast<PFN_vkDestroyDescriptorPool>(
+ vkDestroyDescriptorPool = reinterpret_cast<PFN_vkDestroyDescriptorPool>(
vkGetDeviceProcAddr(vk_device, "vkDestroyDescriptorPool"));
- if (!vkDestroyDescriptorPoolFn) {
+ if (!vkDestroyDescriptorPool) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyDescriptorPool";
return false;
}
- vkDestroyDescriptorSetLayoutFn =
+ vkDestroyDescriptorSetLayout =
reinterpret_cast<PFN_vkDestroyDescriptorSetLayout>(
vkGetDeviceProcAddr(vk_device, "vkDestroyDescriptorSetLayout"));
- if (!vkDestroyDescriptorSetLayoutFn) {
+ if (!vkDestroyDescriptorSetLayout) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyDescriptorSetLayout";
return false;
}
- vkDestroyDeviceFn = reinterpret_cast<PFN_vkDestroyDevice>(
+ vkDestroyDevice = reinterpret_cast<PFN_vkDestroyDevice>(
vkGetDeviceProcAddr(vk_device, "vkDestroyDevice"));
- if (!vkDestroyDeviceFn) {
+ if (!vkDestroyDevice) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyDevice";
return false;
}
- vkDestroyFenceFn = reinterpret_cast<PFN_vkDestroyFence>(
+ vkDestroyFence = reinterpret_cast<PFN_vkDestroyFence>(
vkGetDeviceProcAddr(vk_device, "vkDestroyFence"));
- if (!vkDestroyFenceFn) {
+ if (!vkDestroyFence) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyFence";
return false;
}
- vkDestroyFramebufferFn = reinterpret_cast<PFN_vkDestroyFramebuffer>(
+ vkDestroyFramebuffer = reinterpret_cast<PFN_vkDestroyFramebuffer>(
vkGetDeviceProcAddr(vk_device, "vkDestroyFramebuffer"));
- if (!vkDestroyFramebufferFn) {
+ if (!vkDestroyFramebuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyFramebuffer";
return false;
}
- vkDestroyImageFn = reinterpret_cast<PFN_vkDestroyImage>(
+ vkDestroyImage = reinterpret_cast<PFN_vkDestroyImage>(
vkGetDeviceProcAddr(vk_device, "vkDestroyImage"));
- if (!vkDestroyImageFn) {
+ if (!vkDestroyImage) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyImage";
return false;
}
- vkDestroyImageViewFn = reinterpret_cast<PFN_vkDestroyImageView>(
+ vkDestroyImageView = reinterpret_cast<PFN_vkDestroyImageView>(
vkGetDeviceProcAddr(vk_device, "vkDestroyImageView"));
- if (!vkDestroyImageViewFn) {
+ if (!vkDestroyImageView) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyImageView";
return false;
}
- vkDestroyRenderPassFn = reinterpret_cast<PFN_vkDestroyRenderPass>(
+ vkDestroyRenderPass = reinterpret_cast<PFN_vkDestroyRenderPass>(
vkGetDeviceProcAddr(vk_device, "vkDestroyRenderPass"));
- if (!vkDestroyRenderPassFn) {
+ if (!vkDestroyRenderPass) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyRenderPass";
return false;
}
- vkDestroySamplerFn = reinterpret_cast<PFN_vkDestroySampler>(
+ vkDestroySampler = reinterpret_cast<PFN_vkDestroySampler>(
vkGetDeviceProcAddr(vk_device, "vkDestroySampler"));
- if (!vkDestroySamplerFn) {
+ if (!vkDestroySampler) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroySampler";
return false;
}
- vkDestroySemaphoreFn = reinterpret_cast<PFN_vkDestroySemaphore>(
+ vkDestroySemaphore = reinterpret_cast<PFN_vkDestroySemaphore>(
vkGetDeviceProcAddr(vk_device, "vkDestroySemaphore"));
- if (!vkDestroySemaphoreFn) {
+ if (!vkDestroySemaphore) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroySemaphore";
return false;
}
- vkDestroyShaderModuleFn = reinterpret_cast<PFN_vkDestroyShaderModule>(
+ vkDestroyShaderModule = reinterpret_cast<PFN_vkDestroyShaderModule>(
vkGetDeviceProcAddr(vk_device, "vkDestroyShaderModule"));
- if (!vkDestroyShaderModuleFn) {
+ if (!vkDestroyShaderModule) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyShaderModule";
return false;
}
- vkDeviceWaitIdleFn = reinterpret_cast<PFN_vkDeviceWaitIdle>(
+ vkDeviceWaitIdle = reinterpret_cast<PFN_vkDeviceWaitIdle>(
vkGetDeviceProcAddr(vk_device, "vkDeviceWaitIdle"));
- if (!vkDeviceWaitIdleFn) {
+ if (!vkDeviceWaitIdle) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDeviceWaitIdle";
return false;
}
- vkFlushMappedMemoryRangesFn = reinterpret_cast<PFN_vkFlushMappedMemoryRanges>(
+ vkFlushMappedMemoryRanges = reinterpret_cast<PFN_vkFlushMappedMemoryRanges>(
vkGetDeviceProcAddr(vk_device, "vkFlushMappedMemoryRanges"));
- if (!vkFlushMappedMemoryRangesFn) {
+ if (!vkFlushMappedMemoryRanges) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkFlushMappedMemoryRanges";
return false;
}
- vkEndCommandBufferFn = reinterpret_cast<PFN_vkEndCommandBuffer>(
+ vkEndCommandBuffer = reinterpret_cast<PFN_vkEndCommandBuffer>(
vkGetDeviceProcAddr(vk_device, "vkEndCommandBuffer"));
- if (!vkEndCommandBufferFn) {
+ if (!vkEndCommandBuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEndCommandBuffer";
return false;
}
- vkFreeCommandBuffersFn = reinterpret_cast<PFN_vkFreeCommandBuffers>(
+ vkFreeCommandBuffers = reinterpret_cast<PFN_vkFreeCommandBuffers>(
vkGetDeviceProcAddr(vk_device, "vkFreeCommandBuffers"));
- if (!vkFreeCommandBuffersFn) {
+ if (!vkFreeCommandBuffers) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkFreeCommandBuffers";
return false;
}
- vkFreeDescriptorSetsFn = reinterpret_cast<PFN_vkFreeDescriptorSets>(
+ vkFreeDescriptorSets = reinterpret_cast<PFN_vkFreeDescriptorSets>(
vkGetDeviceProcAddr(vk_device, "vkFreeDescriptorSets"));
- if (!vkFreeDescriptorSetsFn) {
+ if (!vkFreeDescriptorSets) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkFreeDescriptorSets";
return false;
}
- vkFreeMemoryFn = reinterpret_cast<PFN_vkFreeMemory>(
+ vkFreeMemory = reinterpret_cast<PFN_vkFreeMemory>(
vkGetDeviceProcAddr(vk_device, "vkFreeMemory"));
- if (!vkFreeMemoryFn) {
+ if (!vkFreeMemory) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkFreeMemory";
return false;
}
- vkInvalidateMappedMemoryRangesFn =
+ vkInvalidateMappedMemoryRanges =
reinterpret_cast<PFN_vkInvalidateMappedMemoryRanges>(
vkGetDeviceProcAddr(vk_device, "vkInvalidateMappedMemoryRanges"));
- if (!vkInvalidateMappedMemoryRangesFn) {
+ if (!vkInvalidateMappedMemoryRanges) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkInvalidateMappedMemoryRanges";
return false;
}
- vkGetBufferMemoryRequirementsFn =
+ vkGetBufferMemoryRequirements =
reinterpret_cast<PFN_vkGetBufferMemoryRequirements>(
vkGetDeviceProcAddr(vk_device, "vkGetBufferMemoryRequirements"));
- if (!vkGetBufferMemoryRequirementsFn) {
+ if (!vkGetBufferMemoryRequirements) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetBufferMemoryRequirements";
return false;
}
- vkGetDeviceQueueFn = reinterpret_cast<PFN_vkGetDeviceQueue>(
+ vkGetBufferMemoryRequirements2 =
+ reinterpret_cast<PFN_vkGetBufferMemoryRequirements2>(
+ vkGetDeviceProcAddr(vk_device, "vkGetBufferMemoryRequirements2"));
+ if (!vkGetBufferMemoryRequirements2) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkGetBufferMemoryRequirements2";
+ return false;
+ }
+
+ vkGetDeviceQueue = reinterpret_cast<PFN_vkGetDeviceQueue>(
vkGetDeviceProcAddr(vk_device, "vkGetDeviceQueue"));
- if (!vkGetDeviceQueueFn) {
+ if (!vkGetDeviceQueue) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetDeviceQueue";
return false;
}
- vkGetFenceStatusFn = reinterpret_cast<PFN_vkGetFenceStatus>(
+ vkGetDeviceQueue2 = reinterpret_cast<PFN_vkGetDeviceQueue2>(
+ vkGetDeviceProcAddr(vk_device, "vkGetDeviceQueue2"));
+ if (!vkGetDeviceQueue2) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkGetDeviceQueue2";
+ return false;
+ }
+
+ vkGetFenceStatus = reinterpret_cast<PFN_vkGetFenceStatus>(
vkGetDeviceProcAddr(vk_device, "vkGetFenceStatus"));
- if (!vkGetFenceStatusFn) {
+ if (!vkGetFenceStatus) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetFenceStatus";
return false;
}
- vkGetImageMemoryRequirementsFn =
+ vkGetImageMemoryRequirements =
reinterpret_cast<PFN_vkGetImageMemoryRequirements>(
vkGetDeviceProcAddr(vk_device, "vkGetImageMemoryRequirements"));
- if (!vkGetImageMemoryRequirementsFn) {
+ if (!vkGetImageMemoryRequirements) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetImageMemoryRequirements";
return false;
}
- vkMapMemoryFn = reinterpret_cast<PFN_vkMapMemory>(
+ vkGetImageMemoryRequirements2 =
+ reinterpret_cast<PFN_vkGetImageMemoryRequirements2>(
+ vkGetDeviceProcAddr(vk_device, "vkGetImageMemoryRequirements2"));
+ if (!vkGetImageMemoryRequirements2) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkGetImageMemoryRequirements2";
+ return false;
+ }
+
+ vkMapMemory = reinterpret_cast<PFN_vkMapMemory>(
vkGetDeviceProcAddr(vk_device, "vkMapMemory"));
- if (!vkMapMemoryFn) {
+ if (!vkMapMemory) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkMapMemory";
return false;
}
- vkQueueSubmitFn = reinterpret_cast<PFN_vkQueueSubmit>(
+ vkQueueSubmit = reinterpret_cast<PFN_vkQueueSubmit>(
vkGetDeviceProcAddr(vk_device, "vkQueueSubmit"));
- if (!vkQueueSubmitFn) {
+ if (!vkQueueSubmit) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkQueueSubmit";
return false;
}
- vkQueueWaitIdleFn = reinterpret_cast<PFN_vkQueueWaitIdle>(
+ vkQueueWaitIdle = reinterpret_cast<PFN_vkQueueWaitIdle>(
vkGetDeviceProcAddr(vk_device, "vkQueueWaitIdle"));
- if (!vkQueueWaitIdleFn) {
+ if (!vkQueueWaitIdle) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkQueueWaitIdle";
return false;
}
- vkResetCommandBufferFn = reinterpret_cast<PFN_vkResetCommandBuffer>(
+ vkResetCommandBuffer = reinterpret_cast<PFN_vkResetCommandBuffer>(
vkGetDeviceProcAddr(vk_device, "vkResetCommandBuffer"));
- if (!vkResetCommandBufferFn) {
+ if (!vkResetCommandBuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkResetCommandBuffer";
return false;
}
- vkResetFencesFn = reinterpret_cast<PFN_vkResetFences>(
+ vkResetFences = reinterpret_cast<PFN_vkResetFences>(
vkGetDeviceProcAddr(vk_device, "vkResetFences"));
- if (!vkResetFencesFn) {
+ if (!vkResetFences) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkResetFences";
return false;
}
- vkUnmapMemoryFn = reinterpret_cast<PFN_vkUnmapMemory>(
+ vkUnmapMemory = reinterpret_cast<PFN_vkUnmapMemory>(
vkGetDeviceProcAddr(vk_device, "vkUnmapMemory"));
- if (!vkUnmapMemoryFn) {
+ if (!vkUnmapMemory) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkUnmapMemory";
return false;
}
- vkUpdateDescriptorSetsFn = reinterpret_cast<PFN_vkUpdateDescriptorSets>(
+ vkUpdateDescriptorSets = reinterpret_cast<PFN_vkUpdateDescriptorSets>(
vkGetDeviceProcAddr(vk_device, "vkUpdateDescriptorSets"));
- if (!vkUpdateDescriptorSetsFn) {
+ if (!vkUpdateDescriptorSets) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkUpdateDescriptorSets";
return false;
}
- vkWaitForFencesFn = reinterpret_cast<PFN_vkWaitForFences>(
+ vkWaitForFences = reinterpret_cast<PFN_vkWaitForFences>(
vkGetDeviceProcAddr(vk_device, "vkWaitForFences"));
- if (!vkWaitForFencesFn) {
+ if (!vkWaitForFences) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkWaitForFences";
return false;
}
- if (api_version >= VK_API_VERSION_1_1) {
- vkGetDeviceQueue2Fn = reinterpret_cast<PFN_vkGetDeviceQueue2>(
- vkGetDeviceProcAddr(vk_device, "vkGetDeviceQueue2"));
- if (!vkGetDeviceQueue2Fn) {
- DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkGetDeviceQueue2";
- return false;
- }
-
- vkGetBufferMemoryRequirements2Fn =
- reinterpret_cast<PFN_vkGetBufferMemoryRequirements2>(
- vkGetDeviceProcAddr(vk_device, "vkGetBufferMemoryRequirements2"));
- if (!vkGetBufferMemoryRequirements2Fn) {
- DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkGetBufferMemoryRequirements2";
- return false;
- }
-
- vkGetImageMemoryRequirements2Fn =
- reinterpret_cast<PFN_vkGetImageMemoryRequirements2>(
- vkGetDeviceProcAddr(vk_device, "vkGetImageMemoryRequirements2"));
- if (!vkGetImageMemoryRequirements2Fn) {
- DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkGetImageMemoryRequirements2";
- return false;
- }
- }
-
#if defined(OS_ANDROID)
if (gfx::HasExtension(
enabled_extensions,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) {
- vkGetAndroidHardwareBufferPropertiesANDROIDFn =
+ vkGetAndroidHardwareBufferPropertiesANDROID =
reinterpret_cast<PFN_vkGetAndroidHardwareBufferPropertiesANDROID>(
vkGetDeviceProcAddr(vk_device,
"vkGetAndroidHardwareBufferPropertiesANDROID"));
- if (!vkGetAndroidHardwareBufferPropertiesANDROIDFn) {
+ if (!vkGetAndroidHardwareBufferPropertiesANDROID) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetAndroidHardwareBufferPropertiesANDROID";
return false;
@@ -865,17 +865,17 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#if defined(OS_LINUX) || defined(OS_ANDROID)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME)) {
- vkGetSemaphoreFdKHRFn = reinterpret_cast<PFN_vkGetSemaphoreFdKHR>(
+ vkGetSemaphoreFdKHR = reinterpret_cast<PFN_vkGetSemaphoreFdKHR>(
vkGetDeviceProcAddr(vk_device, "vkGetSemaphoreFdKHR"));
- if (!vkGetSemaphoreFdKHRFn) {
+ if (!vkGetSemaphoreFdKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetSemaphoreFdKHR";
return false;
}
- vkImportSemaphoreFdKHRFn = reinterpret_cast<PFN_vkImportSemaphoreFdKHR>(
+ vkImportSemaphoreFdKHR = reinterpret_cast<PFN_vkImportSemaphoreFdKHR>(
vkGetDeviceProcAddr(vk_device, "vkImportSemaphoreFdKHR"));
- if (!vkImportSemaphoreFdKHRFn) {
+ if (!vkImportSemaphoreFdKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkImportSemaphoreFdKHR";
return false;
@@ -886,19 +886,19 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#if defined(OS_WIN)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME)) {
- vkGetSemaphoreWin32HandleKHRFn =
+ vkGetSemaphoreWin32HandleKHR =
reinterpret_cast<PFN_vkGetSemaphoreWin32HandleKHR>(
vkGetDeviceProcAddr(vk_device, "vkGetSemaphoreWin32HandleKHR"));
- if (!vkGetSemaphoreWin32HandleKHRFn) {
+ if (!vkGetSemaphoreWin32HandleKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetSemaphoreWin32HandleKHR";
return false;
}
- vkImportSemaphoreWin32HandleKHRFn =
+ vkImportSemaphoreWin32HandleKHR =
reinterpret_cast<PFN_vkImportSemaphoreWin32HandleKHR>(
vkGetDeviceProcAddr(vk_device, "vkImportSemaphoreWin32HandleKHR"));
- if (!vkImportSemaphoreWin32HandleKHRFn) {
+ if (!vkImportSemaphoreWin32HandleKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkImportSemaphoreWin32HandleKHR";
return false;
@@ -909,18 +909,18 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#if defined(OS_LINUX) || defined(OS_ANDROID)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME)) {
- vkGetMemoryFdKHRFn = reinterpret_cast<PFN_vkGetMemoryFdKHR>(
+ vkGetMemoryFdKHR = reinterpret_cast<PFN_vkGetMemoryFdKHR>(
vkGetDeviceProcAddr(vk_device, "vkGetMemoryFdKHR"));
- if (!vkGetMemoryFdKHRFn) {
+ if (!vkGetMemoryFdKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetMemoryFdKHR";
return false;
}
- vkGetMemoryFdPropertiesKHRFn =
+ vkGetMemoryFdPropertiesKHR =
reinterpret_cast<PFN_vkGetMemoryFdPropertiesKHR>(
vkGetDeviceProcAddr(vk_device, "vkGetMemoryFdPropertiesKHR"));
- if (!vkGetMemoryFdPropertiesKHRFn) {
+ if (!vkGetMemoryFdPropertiesKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetMemoryFdPropertiesKHR";
return false;
@@ -931,20 +931,19 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#if defined(OS_WIN)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME)) {
- vkGetMemoryWin32HandleKHRFn =
- reinterpret_cast<PFN_vkGetMemoryWin32HandleKHR>(
- vkGetDeviceProcAddr(vk_device, "vkGetMemoryWin32HandleKHR"));
- if (!vkGetMemoryWin32HandleKHRFn) {
+ vkGetMemoryWin32HandleKHR = reinterpret_cast<PFN_vkGetMemoryWin32HandleKHR>(
+ vkGetDeviceProcAddr(vk_device, "vkGetMemoryWin32HandleKHR"));
+ if (!vkGetMemoryWin32HandleKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetMemoryWin32HandleKHR";
return false;
}
- vkGetMemoryWin32HandlePropertiesKHRFn =
+ vkGetMemoryWin32HandlePropertiesKHR =
reinterpret_cast<PFN_vkGetMemoryWin32HandlePropertiesKHR>(
vkGetDeviceProcAddr(vk_device,
"vkGetMemoryWin32HandlePropertiesKHR"));
- if (!vkGetMemoryWin32HandlePropertiesKHRFn) {
+ if (!vkGetMemoryWin32HandlePropertiesKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetMemoryWin32HandlePropertiesKHR";
return false;
@@ -955,21 +954,21 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#if defined(OS_FUCHSIA)
if (gfx::HasExtension(enabled_extensions,
VK_FUCHSIA_EXTERNAL_SEMAPHORE_EXTENSION_NAME)) {
- vkImportSemaphoreZirconHandleFUCHSIAFn =
+ vkImportSemaphoreZirconHandleFUCHSIA =
reinterpret_cast<PFN_vkImportSemaphoreZirconHandleFUCHSIA>(
vkGetDeviceProcAddr(vk_device,
"vkImportSemaphoreZirconHandleFUCHSIA"));
- if (!vkImportSemaphoreZirconHandleFUCHSIAFn) {
+ if (!vkImportSemaphoreZirconHandleFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkImportSemaphoreZirconHandleFUCHSIA";
return false;
}
- vkGetSemaphoreZirconHandleFUCHSIAFn =
+ vkGetSemaphoreZirconHandleFUCHSIA =
reinterpret_cast<PFN_vkGetSemaphoreZirconHandleFUCHSIA>(
vkGetDeviceProcAddr(vk_device,
"vkGetSemaphoreZirconHandleFUCHSIA"));
- if (!vkGetSemaphoreZirconHandleFUCHSIAFn) {
+ if (!vkGetSemaphoreZirconHandleFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetSemaphoreZirconHandleFUCHSIA";
return false;
@@ -980,10 +979,10 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#if defined(OS_FUCHSIA)
if (gfx::HasExtension(enabled_extensions,
VK_FUCHSIA_EXTERNAL_MEMORY_EXTENSION_NAME)) {
- vkGetMemoryZirconHandleFUCHSIAFn =
+ vkGetMemoryZirconHandleFUCHSIA =
reinterpret_cast<PFN_vkGetMemoryZirconHandleFUCHSIA>(
vkGetDeviceProcAddr(vk_device, "vkGetMemoryZirconHandleFUCHSIA"));
- if (!vkGetMemoryZirconHandleFUCHSIAFn) {
+ if (!vkGetMemoryZirconHandleFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetMemoryZirconHandleFUCHSIA";
return false;
@@ -994,39 +993,39 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#if defined(OS_FUCHSIA)
if (gfx::HasExtension(enabled_extensions,
VK_FUCHSIA_BUFFER_COLLECTION_EXTENSION_NAME)) {
- vkCreateBufferCollectionFUCHSIAFn =
+ vkCreateBufferCollectionFUCHSIA =
reinterpret_cast<PFN_vkCreateBufferCollectionFUCHSIA>(
vkGetDeviceProcAddr(vk_device, "vkCreateBufferCollectionFUCHSIA"));
- if (!vkCreateBufferCollectionFUCHSIAFn) {
+ if (!vkCreateBufferCollectionFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateBufferCollectionFUCHSIA";
return false;
}
- vkSetBufferCollectionConstraintsFUCHSIAFn =
+ vkSetBufferCollectionConstraintsFUCHSIA =
reinterpret_cast<PFN_vkSetBufferCollectionConstraintsFUCHSIA>(
vkGetDeviceProcAddr(vk_device,
"vkSetBufferCollectionConstraintsFUCHSIA"));
- if (!vkSetBufferCollectionConstraintsFUCHSIAFn) {
+ if (!vkSetBufferCollectionConstraintsFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkSetBufferCollectionConstraintsFUCHSIA";
return false;
}
- vkGetBufferCollectionPropertiesFUCHSIAFn =
+ vkGetBufferCollectionPropertiesFUCHSIA =
reinterpret_cast<PFN_vkGetBufferCollectionPropertiesFUCHSIA>(
vkGetDeviceProcAddr(vk_device,
"vkGetBufferCollectionPropertiesFUCHSIA"));
- if (!vkGetBufferCollectionPropertiesFUCHSIAFn) {
+ if (!vkGetBufferCollectionPropertiesFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetBufferCollectionPropertiesFUCHSIA";
return false;
}
- vkDestroyBufferCollectionFUCHSIAFn =
+ vkDestroyBufferCollectionFUCHSIA =
reinterpret_cast<PFN_vkDestroyBufferCollectionFUCHSIA>(
vkGetDeviceProcAddr(vk_device, "vkDestroyBufferCollectionFUCHSIA"));
- if (!vkDestroyBufferCollectionFUCHSIAFn) {
+ if (!vkDestroyBufferCollectionFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyBufferCollectionFUCHSIA";
return false;
@@ -1035,41 +1034,41 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#endif // defined(OS_FUCHSIA)
if (gfx::HasExtension(enabled_extensions, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
- vkAcquireNextImageKHRFn = reinterpret_cast<PFN_vkAcquireNextImageKHR>(
+ vkAcquireNextImageKHR = reinterpret_cast<PFN_vkAcquireNextImageKHR>(
vkGetDeviceProcAddr(vk_device, "vkAcquireNextImageKHR"));
- if (!vkAcquireNextImageKHRFn) {
+ if (!vkAcquireNextImageKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkAcquireNextImageKHR";
return false;
}
- vkCreateSwapchainKHRFn = reinterpret_cast<PFN_vkCreateSwapchainKHR>(
+ vkCreateSwapchainKHR = reinterpret_cast<PFN_vkCreateSwapchainKHR>(
vkGetDeviceProcAddr(vk_device, "vkCreateSwapchainKHR"));
- if (!vkCreateSwapchainKHRFn) {
+ if (!vkCreateSwapchainKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateSwapchainKHR";
return false;
}
- vkDestroySwapchainKHRFn = reinterpret_cast<PFN_vkDestroySwapchainKHR>(
+ vkDestroySwapchainKHR = reinterpret_cast<PFN_vkDestroySwapchainKHR>(
vkGetDeviceProcAddr(vk_device, "vkDestroySwapchainKHR"));
- if (!vkDestroySwapchainKHRFn) {
+ if (!vkDestroySwapchainKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroySwapchainKHR";
return false;
}
- vkGetSwapchainImagesKHRFn = reinterpret_cast<PFN_vkGetSwapchainImagesKHR>(
+ vkGetSwapchainImagesKHR = reinterpret_cast<PFN_vkGetSwapchainImagesKHR>(
vkGetDeviceProcAddr(vk_device, "vkGetSwapchainImagesKHR"));
- if (!vkGetSwapchainImagesKHRFn) {
+ if (!vkGetSwapchainImagesKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetSwapchainImagesKHR";
return false;
}
- vkQueuePresentKHRFn = reinterpret_cast<PFN_vkQueuePresentKHR>(
+ vkQueuePresentKHR = reinterpret_cast<PFN_vkQueuePresentKHR>(
vkGetDeviceProcAddr(vk_device, "vkQueuePresentKHR"));
- if (!vkQueuePresentKHRFn) {
+ if (!vkQueuePresentKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkQueuePresentKHR";
return false;
diff --git a/chromium/gpu/vulkan/vulkan_function_pointers.h b/chromium/gpu/vulkan/vulkan_function_pointers.h
index cd696ce9963..9d9682c2976 100644
--- a/chromium/gpu/vulkan/vulkan_function_pointers.h
+++ b/chromium/gpu/vulkan/vulkan_function_pointers.h
@@ -44,6 +44,8 @@ namespace gpu {
struct VulkanFunctionPointers;
+constexpr uint32_t kVulkanRequiredApiVersion = VK_API_VERSION_1_1;
+
COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers* GetVulkanFunctionPointers();
struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
@@ -72,10 +74,10 @@ struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
public:
using Fn = R(VKAPI_PTR*)(Args...);
- explicit operator bool() { return !!fn_; }
+ explicit operator bool() const { return !!fn_; }
NO_SANITIZE("cfi-icall")
- R operator()(Args... args) { return fn_(args...); }
+ R operator()(Args... args) const { return fn_(args...); }
Fn get() const { return fn_; }
@@ -91,201 +93,197 @@ struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
};
// Unassociated functions
- VulkanFunction<PFN_vkEnumerateInstanceVersion> vkEnumerateInstanceVersionFn;
- VulkanFunction<PFN_vkGetInstanceProcAddr> vkGetInstanceProcAddrFn;
+ VulkanFunction<PFN_vkGetInstanceProcAddr> vkGetInstanceProcAddr;
- VulkanFunction<PFN_vkCreateInstance> vkCreateInstanceFn;
+ VulkanFunction<PFN_vkEnumerateInstanceVersion> vkEnumerateInstanceVersion;
+ VulkanFunction<PFN_vkCreateInstance> vkCreateInstance;
VulkanFunction<PFN_vkEnumerateInstanceExtensionProperties>
- vkEnumerateInstanceExtensionPropertiesFn;
+ vkEnumerateInstanceExtensionProperties;
VulkanFunction<PFN_vkEnumerateInstanceLayerProperties>
- vkEnumerateInstanceLayerPropertiesFn;
+ vkEnumerateInstanceLayerProperties;
// Instance functions
- VulkanFunction<PFN_vkCreateDevice> vkCreateDeviceFn;
- VulkanFunction<PFN_vkDestroyInstance> vkDestroyInstanceFn;
+ VulkanFunction<PFN_vkCreateDevice> vkCreateDevice;
+ VulkanFunction<PFN_vkDestroyInstance> vkDestroyInstance;
VulkanFunction<PFN_vkEnumerateDeviceExtensionProperties>
- vkEnumerateDeviceExtensionPropertiesFn;
+ vkEnumerateDeviceExtensionProperties;
VulkanFunction<PFN_vkEnumerateDeviceLayerProperties>
- vkEnumerateDeviceLayerPropertiesFn;
- VulkanFunction<PFN_vkEnumeratePhysicalDevices> vkEnumeratePhysicalDevicesFn;
- VulkanFunction<PFN_vkGetDeviceProcAddr> vkGetDeviceProcAddrFn;
- VulkanFunction<PFN_vkGetPhysicalDeviceFeatures> vkGetPhysicalDeviceFeaturesFn;
+ vkEnumerateDeviceLayerProperties;
+ VulkanFunction<PFN_vkEnumeratePhysicalDevices> vkEnumeratePhysicalDevices;
+ VulkanFunction<PFN_vkGetDeviceProcAddr> vkGetDeviceProcAddr;
+ VulkanFunction<PFN_vkGetPhysicalDeviceFeatures2> vkGetPhysicalDeviceFeatures2;
VulkanFunction<PFN_vkGetPhysicalDeviceFormatProperties>
- vkGetPhysicalDeviceFormatPropertiesFn;
+ vkGetPhysicalDeviceFormatProperties;
+ VulkanFunction<PFN_vkGetPhysicalDeviceImageFormatProperties2>
+ vkGetPhysicalDeviceImageFormatProperties2;
VulkanFunction<PFN_vkGetPhysicalDeviceMemoryProperties>
- vkGetPhysicalDeviceMemoryPropertiesFn;
+ vkGetPhysicalDeviceMemoryProperties;
+ VulkanFunction<PFN_vkGetPhysicalDeviceMemoryProperties2>
+ vkGetPhysicalDeviceMemoryProperties2;
VulkanFunction<PFN_vkGetPhysicalDeviceProperties>
- vkGetPhysicalDevicePropertiesFn;
+ vkGetPhysicalDeviceProperties;
VulkanFunction<PFN_vkGetPhysicalDeviceQueueFamilyProperties>
- vkGetPhysicalDeviceQueueFamilyPropertiesFn;
+ vkGetPhysicalDeviceQueueFamilyProperties;
#if DCHECK_IS_ON()
VulkanFunction<PFN_vkCreateDebugReportCallbackEXT>
- vkCreateDebugReportCallbackEXTFn;
+ vkCreateDebugReportCallbackEXT;
VulkanFunction<PFN_vkDestroyDebugReportCallbackEXT>
- vkDestroyDebugReportCallbackEXTFn;
+ vkDestroyDebugReportCallbackEXT;
#endif // DCHECK_IS_ON()
- VulkanFunction<PFN_vkDestroySurfaceKHR> vkDestroySurfaceKHRFn;
+ VulkanFunction<PFN_vkDestroySurfaceKHR> vkDestroySurfaceKHR;
VulkanFunction<PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR>
- vkGetPhysicalDeviceSurfaceCapabilitiesKHRFn;
+ vkGetPhysicalDeviceSurfaceCapabilitiesKHR;
VulkanFunction<PFN_vkGetPhysicalDeviceSurfaceFormatsKHR>
- vkGetPhysicalDeviceSurfaceFormatsKHRFn;
+ vkGetPhysicalDeviceSurfaceFormatsKHR;
VulkanFunction<PFN_vkGetPhysicalDeviceSurfaceSupportKHR>
- vkGetPhysicalDeviceSurfaceSupportKHRFn;
+ vkGetPhysicalDeviceSurfaceSupportKHR;
#if defined(USE_VULKAN_XLIB)
- VulkanFunction<PFN_vkCreateXlibSurfaceKHR> vkCreateXlibSurfaceKHRFn;
+ VulkanFunction<PFN_vkCreateXlibSurfaceKHR> vkCreateXlibSurfaceKHR;
VulkanFunction<PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR>
- vkGetPhysicalDeviceXlibPresentationSupportKHRFn;
+ vkGetPhysicalDeviceXlibPresentationSupportKHR;
#endif // defined(USE_VULKAN_XLIB)
#if defined(OS_WIN)
- VulkanFunction<PFN_vkCreateWin32SurfaceKHR> vkCreateWin32SurfaceKHRFn;
+ VulkanFunction<PFN_vkCreateWin32SurfaceKHR> vkCreateWin32SurfaceKHR;
VulkanFunction<PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR>
- vkGetPhysicalDeviceWin32PresentationSupportKHRFn;
+ vkGetPhysicalDeviceWin32PresentationSupportKHR;
#endif // defined(OS_WIN)
#if defined(OS_ANDROID)
- VulkanFunction<PFN_vkCreateAndroidSurfaceKHR> vkCreateAndroidSurfaceKHRFn;
+ VulkanFunction<PFN_vkCreateAndroidSurfaceKHR> vkCreateAndroidSurfaceKHR;
#endif // defined(OS_ANDROID)
#if defined(OS_FUCHSIA)
VulkanFunction<PFN_vkCreateImagePipeSurfaceFUCHSIA>
- vkCreateImagePipeSurfaceFUCHSIAFn;
+ vkCreateImagePipeSurfaceFUCHSIA;
#endif // defined(OS_FUCHSIA)
- VulkanFunction<PFN_vkGetPhysicalDeviceImageFormatProperties2>
- vkGetPhysicalDeviceImageFormatProperties2Fn;
-
- VulkanFunction<PFN_vkGetPhysicalDeviceFeatures2>
- vkGetPhysicalDeviceFeatures2Fn;
-
// Device functions
- VulkanFunction<PFN_vkAllocateCommandBuffers> vkAllocateCommandBuffersFn;
- VulkanFunction<PFN_vkAllocateDescriptorSets> vkAllocateDescriptorSetsFn;
- VulkanFunction<PFN_vkAllocateMemory> vkAllocateMemoryFn;
- VulkanFunction<PFN_vkBeginCommandBuffer> vkBeginCommandBufferFn;
- VulkanFunction<PFN_vkBindBufferMemory> vkBindBufferMemoryFn;
- VulkanFunction<PFN_vkBindImageMemory> vkBindImageMemoryFn;
- VulkanFunction<PFN_vkCmdBeginRenderPass> vkCmdBeginRenderPassFn;
- VulkanFunction<PFN_vkCmdCopyBuffer> vkCmdCopyBufferFn;
- VulkanFunction<PFN_vkCmdCopyBufferToImage> vkCmdCopyBufferToImageFn;
- VulkanFunction<PFN_vkCmdEndRenderPass> vkCmdEndRenderPassFn;
- VulkanFunction<PFN_vkCmdExecuteCommands> vkCmdExecuteCommandsFn;
- VulkanFunction<PFN_vkCmdNextSubpass> vkCmdNextSubpassFn;
- VulkanFunction<PFN_vkCmdPipelineBarrier> vkCmdPipelineBarrierFn;
- VulkanFunction<PFN_vkCreateBuffer> vkCreateBufferFn;
- VulkanFunction<PFN_vkCreateCommandPool> vkCreateCommandPoolFn;
- VulkanFunction<PFN_vkCreateDescriptorPool> vkCreateDescriptorPoolFn;
- VulkanFunction<PFN_vkCreateDescriptorSetLayout> vkCreateDescriptorSetLayoutFn;
- VulkanFunction<PFN_vkCreateFence> vkCreateFenceFn;
- VulkanFunction<PFN_vkCreateFramebuffer> vkCreateFramebufferFn;
- VulkanFunction<PFN_vkCreateImage> vkCreateImageFn;
- VulkanFunction<PFN_vkCreateImageView> vkCreateImageViewFn;
- VulkanFunction<PFN_vkCreateRenderPass> vkCreateRenderPassFn;
- VulkanFunction<PFN_vkCreateSampler> vkCreateSamplerFn;
- VulkanFunction<PFN_vkCreateSemaphore> vkCreateSemaphoreFn;
- VulkanFunction<PFN_vkCreateShaderModule> vkCreateShaderModuleFn;
- VulkanFunction<PFN_vkDestroyBuffer> vkDestroyBufferFn;
- VulkanFunction<PFN_vkDestroyCommandPool> vkDestroyCommandPoolFn;
- VulkanFunction<PFN_vkDestroyDescriptorPool> vkDestroyDescriptorPoolFn;
- VulkanFunction<PFN_vkDestroyDescriptorSetLayout>
- vkDestroyDescriptorSetLayoutFn;
- VulkanFunction<PFN_vkDestroyDevice> vkDestroyDeviceFn;
- VulkanFunction<PFN_vkDestroyFence> vkDestroyFenceFn;
- VulkanFunction<PFN_vkDestroyFramebuffer> vkDestroyFramebufferFn;
- VulkanFunction<PFN_vkDestroyImage> vkDestroyImageFn;
- VulkanFunction<PFN_vkDestroyImageView> vkDestroyImageViewFn;
- VulkanFunction<PFN_vkDestroyRenderPass> vkDestroyRenderPassFn;
- VulkanFunction<PFN_vkDestroySampler> vkDestroySamplerFn;
- VulkanFunction<PFN_vkDestroySemaphore> vkDestroySemaphoreFn;
- VulkanFunction<PFN_vkDestroyShaderModule> vkDestroyShaderModuleFn;
- VulkanFunction<PFN_vkDeviceWaitIdle> vkDeviceWaitIdleFn;
- VulkanFunction<PFN_vkFlushMappedMemoryRanges> vkFlushMappedMemoryRangesFn;
- VulkanFunction<PFN_vkEndCommandBuffer> vkEndCommandBufferFn;
- VulkanFunction<PFN_vkFreeCommandBuffers> vkFreeCommandBuffersFn;
- VulkanFunction<PFN_vkFreeDescriptorSets> vkFreeDescriptorSetsFn;
- VulkanFunction<PFN_vkFreeMemory> vkFreeMemoryFn;
+ VulkanFunction<PFN_vkAllocateCommandBuffers> vkAllocateCommandBuffers;
+ VulkanFunction<PFN_vkAllocateDescriptorSets> vkAllocateDescriptorSets;
+ VulkanFunction<PFN_vkAllocateMemory> vkAllocateMemory;
+ VulkanFunction<PFN_vkBeginCommandBuffer> vkBeginCommandBuffer;
+ VulkanFunction<PFN_vkBindBufferMemory> vkBindBufferMemory;
+ VulkanFunction<PFN_vkBindBufferMemory2> vkBindBufferMemory2;
+ VulkanFunction<PFN_vkBindImageMemory> vkBindImageMemory;
+ VulkanFunction<PFN_vkBindImageMemory2> vkBindImageMemory2;
+ VulkanFunction<PFN_vkCmdBeginRenderPass> vkCmdBeginRenderPass;
+ VulkanFunction<PFN_vkCmdCopyBuffer> vkCmdCopyBuffer;
+ VulkanFunction<PFN_vkCmdCopyBufferToImage> vkCmdCopyBufferToImage;
+ VulkanFunction<PFN_vkCmdEndRenderPass> vkCmdEndRenderPass;
+ VulkanFunction<PFN_vkCmdExecuteCommands> vkCmdExecuteCommands;
+ VulkanFunction<PFN_vkCmdNextSubpass> vkCmdNextSubpass;
+ VulkanFunction<PFN_vkCmdPipelineBarrier> vkCmdPipelineBarrier;
+ VulkanFunction<PFN_vkCreateBuffer> vkCreateBuffer;
+ VulkanFunction<PFN_vkCreateCommandPool> vkCreateCommandPool;
+ VulkanFunction<PFN_vkCreateDescriptorPool> vkCreateDescriptorPool;
+ VulkanFunction<PFN_vkCreateDescriptorSetLayout> vkCreateDescriptorSetLayout;
+ VulkanFunction<PFN_vkCreateFence> vkCreateFence;
+ VulkanFunction<PFN_vkCreateFramebuffer> vkCreateFramebuffer;
+ VulkanFunction<PFN_vkCreateImage> vkCreateImage;
+ VulkanFunction<PFN_vkCreateImageView> vkCreateImageView;
+ VulkanFunction<PFN_vkCreateRenderPass> vkCreateRenderPass;
+ VulkanFunction<PFN_vkCreateSampler> vkCreateSampler;
+ VulkanFunction<PFN_vkCreateSemaphore> vkCreateSemaphore;
+ VulkanFunction<PFN_vkCreateShaderModule> vkCreateShaderModule;
+ VulkanFunction<PFN_vkDestroyBuffer> vkDestroyBuffer;
+ VulkanFunction<PFN_vkDestroyCommandPool> vkDestroyCommandPool;
+ VulkanFunction<PFN_vkDestroyDescriptorPool> vkDestroyDescriptorPool;
+ VulkanFunction<PFN_vkDestroyDescriptorSetLayout> vkDestroyDescriptorSetLayout;
+ VulkanFunction<PFN_vkDestroyDevice> vkDestroyDevice;
+ VulkanFunction<PFN_vkDestroyFence> vkDestroyFence;
+ VulkanFunction<PFN_vkDestroyFramebuffer> vkDestroyFramebuffer;
+ VulkanFunction<PFN_vkDestroyImage> vkDestroyImage;
+ VulkanFunction<PFN_vkDestroyImageView> vkDestroyImageView;
+ VulkanFunction<PFN_vkDestroyRenderPass> vkDestroyRenderPass;
+ VulkanFunction<PFN_vkDestroySampler> vkDestroySampler;
+ VulkanFunction<PFN_vkDestroySemaphore> vkDestroySemaphore;
+ VulkanFunction<PFN_vkDestroyShaderModule> vkDestroyShaderModule;
+ VulkanFunction<PFN_vkDeviceWaitIdle> vkDeviceWaitIdle;
+ VulkanFunction<PFN_vkFlushMappedMemoryRanges> vkFlushMappedMemoryRanges;
+ VulkanFunction<PFN_vkEndCommandBuffer> vkEndCommandBuffer;
+ VulkanFunction<PFN_vkFreeCommandBuffers> vkFreeCommandBuffers;
+ VulkanFunction<PFN_vkFreeDescriptorSets> vkFreeDescriptorSets;
+ VulkanFunction<PFN_vkFreeMemory> vkFreeMemory;
VulkanFunction<PFN_vkInvalidateMappedMemoryRanges>
- vkInvalidateMappedMemoryRangesFn;
+ vkInvalidateMappedMemoryRanges;
VulkanFunction<PFN_vkGetBufferMemoryRequirements>
- vkGetBufferMemoryRequirementsFn;
- VulkanFunction<PFN_vkGetDeviceQueue> vkGetDeviceQueueFn;
- VulkanFunction<PFN_vkGetFenceStatus> vkGetFenceStatusFn;
- VulkanFunction<PFN_vkGetImageMemoryRequirements>
- vkGetImageMemoryRequirementsFn;
- VulkanFunction<PFN_vkMapMemory> vkMapMemoryFn;
- VulkanFunction<PFN_vkQueueSubmit> vkQueueSubmitFn;
- VulkanFunction<PFN_vkQueueWaitIdle> vkQueueWaitIdleFn;
- VulkanFunction<PFN_vkResetCommandBuffer> vkResetCommandBufferFn;
- VulkanFunction<PFN_vkResetFences> vkResetFencesFn;
- VulkanFunction<PFN_vkUnmapMemory> vkUnmapMemoryFn;
- VulkanFunction<PFN_vkUpdateDescriptorSets> vkUpdateDescriptorSetsFn;
- VulkanFunction<PFN_vkWaitForFences> vkWaitForFencesFn;
-
- VulkanFunction<PFN_vkGetDeviceQueue2> vkGetDeviceQueue2Fn;
+ vkGetBufferMemoryRequirements;
VulkanFunction<PFN_vkGetBufferMemoryRequirements2>
- vkGetBufferMemoryRequirements2Fn;
+ vkGetBufferMemoryRequirements2;
+ VulkanFunction<PFN_vkGetDeviceQueue> vkGetDeviceQueue;
+ VulkanFunction<PFN_vkGetDeviceQueue2> vkGetDeviceQueue2;
+ VulkanFunction<PFN_vkGetFenceStatus> vkGetFenceStatus;
+ VulkanFunction<PFN_vkGetImageMemoryRequirements> vkGetImageMemoryRequirements;
VulkanFunction<PFN_vkGetImageMemoryRequirements2>
- vkGetImageMemoryRequirements2Fn;
+ vkGetImageMemoryRequirements2;
+ VulkanFunction<PFN_vkMapMemory> vkMapMemory;
+ VulkanFunction<PFN_vkQueueSubmit> vkQueueSubmit;
+ VulkanFunction<PFN_vkQueueWaitIdle> vkQueueWaitIdle;
+ VulkanFunction<PFN_vkResetCommandBuffer> vkResetCommandBuffer;
+ VulkanFunction<PFN_vkResetFences> vkResetFences;
+ VulkanFunction<PFN_vkUnmapMemory> vkUnmapMemory;
+ VulkanFunction<PFN_vkUpdateDescriptorSets> vkUpdateDescriptorSets;
+ VulkanFunction<PFN_vkWaitForFences> vkWaitForFences;
#if defined(OS_ANDROID)
VulkanFunction<PFN_vkGetAndroidHardwareBufferPropertiesANDROID>
- vkGetAndroidHardwareBufferPropertiesANDROIDFn;
+ vkGetAndroidHardwareBufferPropertiesANDROID;
#endif // defined(OS_ANDROID)
#if defined(OS_LINUX) || defined(OS_ANDROID)
- VulkanFunction<PFN_vkGetSemaphoreFdKHR> vkGetSemaphoreFdKHRFn;
- VulkanFunction<PFN_vkImportSemaphoreFdKHR> vkImportSemaphoreFdKHRFn;
+ VulkanFunction<PFN_vkGetSemaphoreFdKHR> vkGetSemaphoreFdKHR;
+ VulkanFunction<PFN_vkImportSemaphoreFdKHR> vkImportSemaphoreFdKHR;
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
#if defined(OS_WIN)
- VulkanFunction<PFN_vkGetSemaphoreWin32HandleKHR>
- vkGetSemaphoreWin32HandleKHRFn;
+ VulkanFunction<PFN_vkGetSemaphoreWin32HandleKHR> vkGetSemaphoreWin32HandleKHR;
VulkanFunction<PFN_vkImportSemaphoreWin32HandleKHR>
- vkImportSemaphoreWin32HandleKHRFn;
+ vkImportSemaphoreWin32HandleKHR;
#endif // defined(OS_WIN)
#if defined(OS_LINUX) || defined(OS_ANDROID)
- VulkanFunction<PFN_vkGetMemoryFdKHR> vkGetMemoryFdKHRFn;
- VulkanFunction<PFN_vkGetMemoryFdPropertiesKHR> vkGetMemoryFdPropertiesKHRFn;
+ VulkanFunction<PFN_vkGetMemoryFdKHR> vkGetMemoryFdKHR;
+ VulkanFunction<PFN_vkGetMemoryFdPropertiesKHR> vkGetMemoryFdPropertiesKHR;
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
#if defined(OS_WIN)
- VulkanFunction<PFN_vkGetMemoryWin32HandleKHR> vkGetMemoryWin32HandleKHRFn;
+ VulkanFunction<PFN_vkGetMemoryWin32HandleKHR> vkGetMemoryWin32HandleKHR;
VulkanFunction<PFN_vkGetMemoryWin32HandlePropertiesKHR>
- vkGetMemoryWin32HandlePropertiesKHRFn;
+ vkGetMemoryWin32HandlePropertiesKHR;
#endif // defined(OS_WIN)
#if defined(OS_FUCHSIA)
VulkanFunction<PFN_vkImportSemaphoreZirconHandleFUCHSIA>
- vkImportSemaphoreZirconHandleFUCHSIAFn;
+ vkImportSemaphoreZirconHandleFUCHSIA;
VulkanFunction<PFN_vkGetSemaphoreZirconHandleFUCHSIA>
- vkGetSemaphoreZirconHandleFUCHSIAFn;
+ vkGetSemaphoreZirconHandleFUCHSIA;
#endif // defined(OS_FUCHSIA)
#if defined(OS_FUCHSIA)
VulkanFunction<PFN_vkGetMemoryZirconHandleFUCHSIA>
- vkGetMemoryZirconHandleFUCHSIAFn;
+ vkGetMemoryZirconHandleFUCHSIA;
#endif // defined(OS_FUCHSIA)
#if defined(OS_FUCHSIA)
VulkanFunction<PFN_vkCreateBufferCollectionFUCHSIA>
- vkCreateBufferCollectionFUCHSIAFn;
+ vkCreateBufferCollectionFUCHSIA;
VulkanFunction<PFN_vkSetBufferCollectionConstraintsFUCHSIA>
- vkSetBufferCollectionConstraintsFUCHSIAFn;
+ vkSetBufferCollectionConstraintsFUCHSIA;
VulkanFunction<PFN_vkGetBufferCollectionPropertiesFUCHSIA>
- vkGetBufferCollectionPropertiesFUCHSIAFn;
+ vkGetBufferCollectionPropertiesFUCHSIA;
VulkanFunction<PFN_vkDestroyBufferCollectionFUCHSIA>
- vkDestroyBufferCollectionFUCHSIAFn;
+ vkDestroyBufferCollectionFUCHSIA;
#endif // defined(OS_FUCHSIA)
- VulkanFunction<PFN_vkAcquireNextImageKHR> vkAcquireNextImageKHRFn;
- VulkanFunction<PFN_vkCreateSwapchainKHR> vkCreateSwapchainKHRFn;
- VulkanFunction<PFN_vkDestroySwapchainKHR> vkDestroySwapchainKHRFn;
- VulkanFunction<PFN_vkGetSwapchainImagesKHR> vkGetSwapchainImagesKHRFn;
- VulkanFunction<PFN_vkQueuePresentKHR> vkQueuePresentKHRFn;
+ VulkanFunction<PFN_vkAcquireNextImageKHR> vkAcquireNextImageKHR;
+ VulkanFunction<PFN_vkCreateSwapchainKHR> vkCreateSwapchainKHR;
+ VulkanFunction<PFN_vkDestroySwapchainKHR> vkDestroySwapchainKHR;
+ VulkanFunction<PFN_vkGetSwapchainImagesKHR> vkGetSwapchainImagesKHR;
+ VulkanFunction<PFN_vkQueuePresentKHR> vkQueuePresentKHR;
};
} // namespace gpu
@@ -293,18 +291,18 @@ struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
// Unassociated functions
ALWAYS_INLINE PFN_vkVoidFunction vkGetInstanceProcAddr(VkInstance instance,
const char* pName) {
- return gpu::GetVulkanFunctionPointers()->vkGetInstanceProcAddrFn(instance,
- pName);
+ return gpu::GetVulkanFunctionPointers()->vkGetInstanceProcAddr(instance,
+ pName);
}
+
ALWAYS_INLINE VkResult vkEnumerateInstanceVersion(uint32_t* pApiVersion) {
- return gpu::GetVulkanFunctionPointers()->vkEnumerateInstanceVersionFn(
+ return gpu::GetVulkanFunctionPointers()->vkEnumerateInstanceVersion(
pApiVersion);
}
-
ALWAYS_INLINE VkResult vkCreateInstance(const VkInstanceCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkInstance* pInstance) {
- return gpu::GetVulkanFunctionPointers()->vkCreateInstanceFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateInstance(
pCreateInfo, pAllocator, pInstance);
}
ALWAYS_INLINE VkResult
@@ -312,13 +310,13 @@ vkEnumerateInstanceExtensionProperties(const char* pLayerName,
uint32_t* pPropertyCount,
VkExtensionProperties* pProperties) {
return gpu::GetVulkanFunctionPointers()
- ->vkEnumerateInstanceExtensionPropertiesFn(pLayerName, pPropertyCount,
- pProperties);
+ ->vkEnumerateInstanceExtensionProperties(pLayerName, pPropertyCount,
+ pProperties);
}
ALWAYS_INLINE VkResult
vkEnumerateInstanceLayerProperties(uint32_t* pPropertyCount,
VkLayerProperties* pProperties) {
- return gpu::GetVulkanFunctionPointers()->vkEnumerateInstanceLayerPropertiesFn(
+ return gpu::GetVulkanFunctionPointers()->vkEnumerateInstanceLayerProperties(
pPropertyCount, pProperties);
}
@@ -327,66 +325,77 @@ ALWAYS_INLINE VkResult vkCreateDevice(VkPhysicalDevice physicalDevice,
const VkDeviceCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDevice* pDevice) {
- return gpu::GetVulkanFunctionPointers()->vkCreateDeviceFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateDevice(
physicalDevice, pCreateInfo, pAllocator, pDevice);
}
ALWAYS_INLINE void vkDestroyInstance(VkInstance instance,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyInstanceFn(instance,
- pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroyInstance(instance,
+ pAllocator);
}
ALWAYS_INLINE VkResult
vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
const char* pLayerName,
uint32_t* pPropertyCount,
VkExtensionProperties* pProperties) {
- return gpu::GetVulkanFunctionPointers()
- ->vkEnumerateDeviceExtensionPropertiesFn(physicalDevice, pLayerName,
- pPropertyCount, pProperties);
+ return gpu::GetVulkanFunctionPointers()->vkEnumerateDeviceExtensionProperties(
+ physicalDevice, pLayerName, pPropertyCount, pProperties);
}
ALWAYS_INLINE VkResult
vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
uint32_t* pPropertyCount,
VkLayerProperties* pProperties) {
- return gpu::GetVulkanFunctionPointers()->vkEnumerateDeviceLayerPropertiesFn(
+ return gpu::GetVulkanFunctionPointers()->vkEnumerateDeviceLayerProperties(
physicalDevice, pPropertyCount, pProperties);
}
ALWAYS_INLINE VkResult
vkEnumeratePhysicalDevices(VkInstance instance,
uint32_t* pPhysicalDeviceCount,
VkPhysicalDevice* pPhysicalDevices) {
- return gpu::GetVulkanFunctionPointers()->vkEnumeratePhysicalDevicesFn(
+ return gpu::GetVulkanFunctionPointers()->vkEnumeratePhysicalDevices(
instance, pPhysicalDeviceCount, pPhysicalDevices);
}
ALWAYS_INLINE PFN_vkVoidFunction vkGetDeviceProcAddr(VkDevice device,
const char* pName) {
- return gpu::GetVulkanFunctionPointers()->vkGetDeviceProcAddrFn(device, pName);
+ return gpu::GetVulkanFunctionPointers()->vkGetDeviceProcAddr(device, pName);
}
-ALWAYS_INLINE void vkGetPhysicalDeviceFeatures(
+ALWAYS_INLINE void vkGetPhysicalDeviceFeatures2(
VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceFeatures* pFeatures) {
- return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceFeaturesFn(
+ VkPhysicalDeviceFeatures2* pFeatures) {
+ return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceFeatures2(
physicalDevice, pFeatures);
}
ALWAYS_INLINE void vkGetPhysicalDeviceFormatProperties(
VkPhysicalDevice physicalDevice,
VkFormat format,
VkFormatProperties* pFormatProperties) {
+ return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceFormatProperties(
+ physicalDevice, format, pFormatProperties);
+}
+ALWAYS_INLINE VkResult vkGetPhysicalDeviceImageFormatProperties2(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
+ VkImageFormatProperties2* pImageFormatProperties) {
return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceFormatPropertiesFn(physicalDevice, format,
- pFormatProperties);
+ ->vkGetPhysicalDeviceImageFormatProperties2(
+ physicalDevice, pImageFormatInfo, pImageFormatProperties);
}
ALWAYS_INLINE void vkGetPhysicalDeviceMemoryProperties(
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceMemoryProperties* pMemoryProperties) {
- return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceMemoryPropertiesFn(physicalDevice,
- pMemoryProperties);
+ return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceMemoryProperties(
+ physicalDevice, pMemoryProperties);
+}
+ALWAYS_INLINE void vkGetPhysicalDeviceMemoryProperties2(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceMemoryProperties2* pMemoryProperties) {
+ return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceMemoryProperties2(
+ physicalDevice, pMemoryProperties);
}
ALWAYS_INLINE void vkGetPhysicalDeviceProperties(
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceProperties* pProperties) {
- return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDevicePropertiesFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceProperties(
physicalDevice, pProperties);
}
ALWAYS_INLINE void vkGetPhysicalDeviceQueueFamilyProperties(
@@ -394,7 +403,7 @@ ALWAYS_INLINE void vkGetPhysicalDeviceQueueFamilyProperties(
uint32_t* pQueueFamilyPropertyCount,
VkQueueFamilyProperties* pQueueFamilyProperties) {
return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceQueueFamilyPropertiesFn(
+ ->vkGetPhysicalDeviceQueueFamilyProperties(
physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);
}
@@ -404,14 +413,14 @@ ALWAYS_INLINE VkResult vkCreateDebugReportCallbackEXT(
const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDebugReportCallbackEXT* pCallback) {
- return gpu::GetVulkanFunctionPointers()->vkCreateDebugReportCallbackEXTFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateDebugReportCallbackEXT(
instance, pCreateInfo, pAllocator, pCallback);
}
ALWAYS_INLINE void vkDestroyDebugReportCallbackEXT(
VkInstance instance,
VkDebugReportCallbackEXT callback,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyDebugReportCallbackEXTFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroyDebugReportCallbackEXT(
instance, callback, pAllocator);
}
#endif // DCHECK_IS_ON()
@@ -420,7 +429,7 @@ ALWAYS_INLINE void vkDestroySurfaceKHR(
VkInstance instance,
VkSurfaceKHR surface,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroySurfaceKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroySurfaceKHR(
instance, surface, pAllocator);
}
ALWAYS_INLINE VkResult vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
@@ -428,26 +437,24 @@ ALWAYS_INLINE VkResult vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR* pSurfaceCapabilities) {
return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceSurfaceCapabilitiesKHRFn(physicalDevice, surface,
- pSurfaceCapabilities);
+ ->vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface,
+ pSurfaceCapabilities);
}
ALWAYS_INLINE VkResult
vkGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t* pSurfaceFormatCount,
VkSurfaceFormatKHR* pSurfaceFormats) {
- return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceSurfaceFormatsKHRFn(
- physicalDevice, surface, pSurfaceFormatCount, pSurfaceFormats);
+ return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceSurfaceFormatsKHR(
+ physicalDevice, surface, pSurfaceFormatCount, pSurfaceFormats);
}
ALWAYS_INLINE VkResult
vkGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
VkSurfaceKHR surface,
VkBool32* pSupported) {
- return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceSurfaceSupportKHRFn(physicalDevice, queueFamilyIndex,
- surface, pSupported);
+ return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceSurfaceSupportKHR(
+ physicalDevice, queueFamilyIndex, surface, pSupported);
}
#if defined(USE_VULKAN_XLIB)
@@ -456,7 +463,7 @@ vkCreateXlibSurfaceKHR(VkInstance instance,
const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface) {
- return gpu::GetVulkanFunctionPointers()->vkCreateXlibSurfaceKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateXlibSurfaceKHR(
instance, pCreateInfo, pAllocator, pSurface);
}
ALWAYS_INLINE VkBool32
@@ -465,7 +472,7 @@ vkGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
Display* dpy,
VisualID visualID) {
return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceXlibPresentationSupportKHRFn(
+ ->vkGetPhysicalDeviceXlibPresentationSupportKHR(
physicalDevice, queueFamilyIndex, dpy, visualID);
}
#endif // defined(USE_VULKAN_XLIB)
@@ -476,15 +483,15 @@ vkCreateWin32SurfaceKHR(VkInstance instance,
const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface) {
- return gpu::GetVulkanFunctionPointers()->vkCreateWin32SurfaceKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateWin32SurfaceKHR(
instance, pCreateInfo, pAllocator, pSurface);
}
ALWAYS_INLINE VkBool32
vkGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) {
return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceWin32PresentationSupportKHRFn(physicalDevice,
- queueFamilyIndex);
+ ->vkGetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice,
+ queueFamilyIndex);
}
#endif // defined(OS_WIN)
@@ -494,7 +501,7 @@ vkCreateAndroidSurfaceKHR(VkInstance instance,
const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface) {
- return gpu::GetVulkanFunctionPointers()->vkCreateAndroidSurfaceKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateAndroidSurfaceKHR(
instance, pCreateInfo, pAllocator, pSurface);
}
#endif // defined(OS_ANDROID)
@@ -505,40 +512,24 @@ ALWAYS_INLINE VkResult vkCreateImagePipeSurfaceFUCHSIA(
const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface) {
- return gpu::GetVulkanFunctionPointers()->vkCreateImagePipeSurfaceFUCHSIAFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateImagePipeSurfaceFUCHSIA(
instance, pCreateInfo, pAllocator, pSurface);
}
#endif // defined(OS_FUCHSIA)
-ALWAYS_INLINE VkResult vkGetPhysicalDeviceImageFormatProperties2(
- VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
- VkImageFormatProperties2* pImageFormatProperties) {
- return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceImageFormatProperties2Fn(
- physicalDevice, pImageFormatInfo, pImageFormatProperties);
-}
-
-ALWAYS_INLINE void vkGetPhysicalDeviceFeatures2(
- VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceFeatures2* pFeatures) {
- return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceFeatures2Fn(
- physicalDevice, pFeatures);
-}
-
// Device functions
ALWAYS_INLINE VkResult
vkAllocateCommandBuffers(VkDevice device,
const VkCommandBufferAllocateInfo* pAllocateInfo,
VkCommandBuffer* pCommandBuffers) {
- return gpu::GetVulkanFunctionPointers()->vkAllocateCommandBuffersFn(
+ return gpu::GetVulkanFunctionPointers()->vkAllocateCommandBuffers(
device, pAllocateInfo, pCommandBuffers);
}
ALWAYS_INLINE VkResult
vkAllocateDescriptorSets(VkDevice device,
const VkDescriptorSetAllocateInfo* pAllocateInfo,
VkDescriptorSet* pDescriptorSets) {
- return gpu::GetVulkanFunctionPointers()->vkAllocateDescriptorSetsFn(
+ return gpu::GetVulkanFunctionPointers()->vkAllocateDescriptorSets(
device, pAllocateInfo, pDescriptorSets);
}
ALWAYS_INLINE VkResult
@@ -546,34 +537,48 @@ vkAllocateMemory(VkDevice device,
const VkMemoryAllocateInfo* pAllocateInfo,
const VkAllocationCallbacks* pAllocator,
VkDeviceMemory* pMemory) {
- return gpu::GetVulkanFunctionPointers()->vkAllocateMemoryFn(
+ return gpu::GetVulkanFunctionPointers()->vkAllocateMemory(
device, pAllocateInfo, pAllocator, pMemory);
}
ALWAYS_INLINE VkResult
vkBeginCommandBuffer(VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo* pBeginInfo) {
- return gpu::GetVulkanFunctionPointers()->vkBeginCommandBufferFn(commandBuffer,
- pBeginInfo);
+ return gpu::GetVulkanFunctionPointers()->vkBeginCommandBuffer(commandBuffer,
+ pBeginInfo);
}
ALWAYS_INLINE VkResult vkBindBufferMemory(VkDevice device,
VkBuffer buffer,
VkDeviceMemory memory,
VkDeviceSize memoryOffset) {
- return gpu::GetVulkanFunctionPointers()->vkBindBufferMemoryFn(
+ return gpu::GetVulkanFunctionPointers()->vkBindBufferMemory(
device, buffer, memory, memoryOffset);
}
+ALWAYS_INLINE VkResult
+vkBindBufferMemory2(VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindBufferMemoryInfo* pBindInfos) {
+ return gpu::GetVulkanFunctionPointers()->vkBindBufferMemory2(
+ device, bindInfoCount, pBindInfos);
+}
ALWAYS_INLINE VkResult vkBindImageMemory(VkDevice device,
VkImage image,
VkDeviceMemory memory,
VkDeviceSize memoryOffset) {
- return gpu::GetVulkanFunctionPointers()->vkBindImageMemoryFn(
+ return gpu::GetVulkanFunctionPointers()->vkBindImageMemory(
device, image, memory, memoryOffset);
}
+ALWAYS_INLINE VkResult
+vkBindImageMemory2(VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindImageMemoryInfo* pBindInfos) {
+ return gpu::GetVulkanFunctionPointers()->vkBindImageMemory2(
+ device, bindInfoCount, pBindInfos);
+}
ALWAYS_INLINE void vkCmdBeginRenderPass(
VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo* pRenderPassBegin,
VkSubpassContents contents) {
- return gpu::GetVulkanFunctionPointers()->vkCmdBeginRenderPassFn(
+ return gpu::GetVulkanFunctionPointers()->vkCmdBeginRenderPass(
commandBuffer, pRenderPassBegin, contents);
}
ALWAYS_INLINE void vkCmdCopyBuffer(VkCommandBuffer commandBuffer,
@@ -581,7 +586,7 @@ ALWAYS_INLINE void vkCmdCopyBuffer(VkCommandBuffer commandBuffer,
VkBuffer dstBuffer,
uint32_t regionCount,
const VkBufferCopy* pRegions) {
- return gpu::GetVulkanFunctionPointers()->vkCmdCopyBufferFn(
+ return gpu::GetVulkanFunctionPointers()->vkCmdCopyBuffer(
commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
}
ALWAYS_INLINE void vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer,
@@ -590,24 +595,24 @@ ALWAYS_INLINE void vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer,
VkImageLayout dstImageLayout,
uint32_t regionCount,
const VkBufferImageCopy* pRegions) {
- return gpu::GetVulkanFunctionPointers()->vkCmdCopyBufferToImageFn(
+ return gpu::GetVulkanFunctionPointers()->vkCmdCopyBufferToImage(
commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
pRegions);
}
ALWAYS_INLINE void vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
- return gpu::GetVulkanFunctionPointers()->vkCmdEndRenderPassFn(commandBuffer);
+ return gpu::GetVulkanFunctionPointers()->vkCmdEndRenderPass(commandBuffer);
}
ALWAYS_INLINE void vkCmdExecuteCommands(
VkCommandBuffer commandBuffer,
uint32_t commandBufferCount,
const VkCommandBuffer* pCommandBuffers) {
- return gpu::GetVulkanFunctionPointers()->vkCmdExecuteCommandsFn(
+ return gpu::GetVulkanFunctionPointers()->vkCmdExecuteCommands(
commandBuffer, commandBufferCount, pCommandBuffers);
}
ALWAYS_INLINE void vkCmdNextSubpass(VkCommandBuffer commandBuffer,
VkSubpassContents contents) {
- return gpu::GetVulkanFunctionPointers()->vkCmdNextSubpassFn(commandBuffer,
- contents);
+ return gpu::GetVulkanFunctionPointers()->vkCmdNextSubpass(commandBuffer,
+ contents);
}
ALWAYS_INLINE void vkCmdPipelineBarrier(
VkCommandBuffer commandBuffer,
@@ -620,7 +625,7 @@ ALWAYS_INLINE void vkCmdPipelineBarrier(
const VkBufferMemoryBarrier* pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier* pImageMemoryBarriers) {
- return gpu::GetVulkanFunctionPointers()->vkCmdPipelineBarrierFn(
+ return gpu::GetVulkanFunctionPointers()->vkCmdPipelineBarrier(
commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
@@ -629,15 +634,15 @@ ALWAYS_INLINE VkResult vkCreateBuffer(VkDevice device,
const VkBufferCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkBuffer* pBuffer) {
- return gpu::GetVulkanFunctionPointers()->vkCreateBufferFn(
- device, pCreateInfo, pAllocator, pBuffer);
+ return gpu::GetVulkanFunctionPointers()->vkCreateBuffer(device, pCreateInfo,
+ pAllocator, pBuffer);
}
ALWAYS_INLINE VkResult
vkCreateCommandPool(VkDevice device,
const VkCommandPoolCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkCommandPool* pCommandPool) {
- return gpu::GetVulkanFunctionPointers()->vkCreateCommandPoolFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateCommandPool(
device, pCreateInfo, pAllocator, pCommandPool);
}
ALWAYS_INLINE VkResult
@@ -645,7 +650,7 @@ vkCreateDescriptorPool(VkDevice device,
const VkDescriptorPoolCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDescriptorPool* pDescriptorPool) {
- return gpu::GetVulkanFunctionPointers()->vkCreateDescriptorPoolFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateDescriptorPool(
device, pCreateInfo, pAllocator, pDescriptorPool);
}
ALWAYS_INLINE VkResult
@@ -653,37 +658,37 @@ vkCreateDescriptorSetLayout(VkDevice device,
const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDescriptorSetLayout* pSetLayout) {
- return gpu::GetVulkanFunctionPointers()->vkCreateDescriptorSetLayoutFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateDescriptorSetLayout(
device, pCreateInfo, pAllocator, pSetLayout);
}
ALWAYS_INLINE VkResult vkCreateFence(VkDevice device,
const VkFenceCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkFence* pFence) {
- return gpu::GetVulkanFunctionPointers()->vkCreateFenceFn(device, pCreateInfo,
- pAllocator, pFence);
+ return gpu::GetVulkanFunctionPointers()->vkCreateFence(device, pCreateInfo,
+ pAllocator, pFence);
}
ALWAYS_INLINE VkResult
vkCreateFramebuffer(VkDevice device,
const VkFramebufferCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkFramebuffer* pFramebuffer) {
- return gpu::GetVulkanFunctionPointers()->vkCreateFramebufferFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateFramebuffer(
device, pCreateInfo, pAllocator, pFramebuffer);
}
ALWAYS_INLINE VkResult vkCreateImage(VkDevice device,
const VkImageCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkImage* pImage) {
- return gpu::GetVulkanFunctionPointers()->vkCreateImageFn(device, pCreateInfo,
- pAllocator, pImage);
+ return gpu::GetVulkanFunctionPointers()->vkCreateImage(device, pCreateInfo,
+ pAllocator, pImage);
}
ALWAYS_INLINE VkResult
vkCreateImageView(VkDevice device,
const VkImageViewCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkImageView* pView) {
- return gpu::GetVulkanFunctionPointers()->vkCreateImageViewFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateImageView(
device, pCreateInfo, pAllocator, pView);
}
ALWAYS_INLINE VkResult
@@ -691,14 +696,14 @@ vkCreateRenderPass(VkDevice device,
const VkRenderPassCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkRenderPass* pRenderPass) {
- return gpu::GetVulkanFunctionPointers()->vkCreateRenderPassFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateRenderPass(
device, pCreateInfo, pAllocator, pRenderPass);
}
ALWAYS_INLINE VkResult vkCreateSampler(VkDevice device,
const VkSamplerCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSampler* pSampler) {
- return gpu::GetVulkanFunctionPointers()->vkCreateSamplerFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateSampler(
device, pCreateInfo, pAllocator, pSampler);
}
ALWAYS_INLINE VkResult
@@ -706,7 +711,7 @@ vkCreateSemaphore(VkDevice device,
const VkSemaphoreCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSemaphore* pSemaphore) {
- return gpu::GetVulkanFunctionPointers()->vkCreateSemaphoreFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateSemaphore(
device, pCreateInfo, pAllocator, pSemaphore);
}
ALWAYS_INLINE VkResult
@@ -714,111 +719,110 @@ vkCreateShaderModule(VkDevice device,
const VkShaderModuleCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkShaderModule* pShaderModule) {
- return gpu::GetVulkanFunctionPointers()->vkCreateShaderModuleFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateShaderModule(
device, pCreateInfo, pAllocator, pShaderModule);
}
ALWAYS_INLINE void vkDestroyBuffer(VkDevice device,
VkBuffer buffer,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyBufferFn(device, buffer,
- pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroyBuffer(device, buffer,
+ pAllocator);
}
ALWAYS_INLINE void vkDestroyCommandPool(
VkDevice device,
VkCommandPool commandPool,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyCommandPoolFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroyCommandPool(
device, commandPool, pAllocator);
}
ALWAYS_INLINE void vkDestroyDescriptorPool(
VkDevice device,
VkDescriptorPool descriptorPool,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyDescriptorPoolFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroyDescriptorPool(
device, descriptorPool, pAllocator);
}
ALWAYS_INLINE void vkDestroyDescriptorSetLayout(
VkDevice device,
VkDescriptorSetLayout descriptorSetLayout,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyDescriptorSetLayoutFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroyDescriptorSetLayout(
device, descriptorSetLayout, pAllocator);
}
ALWAYS_INLINE void vkDestroyDevice(VkDevice device,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyDeviceFn(device,
- pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroyDevice(device, pAllocator);
}
ALWAYS_INLINE void vkDestroyFence(VkDevice device,
VkFence fence,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyFenceFn(device, fence,
- pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroyFence(device, fence,
+ pAllocator);
}
ALWAYS_INLINE void vkDestroyFramebuffer(
VkDevice device,
VkFramebuffer framebuffer,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyFramebufferFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroyFramebuffer(
device, framebuffer, pAllocator);
}
ALWAYS_INLINE void vkDestroyImage(VkDevice device,
VkImage image,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyImageFn(device, image,
- pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroyImage(device, image,
+ pAllocator);
}
ALWAYS_INLINE void vkDestroyImageView(VkDevice device,
VkImageView imageView,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyImageViewFn(
- device, imageView, pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroyImageView(device, imageView,
+ pAllocator);
}
ALWAYS_INLINE void vkDestroyRenderPass(
VkDevice device,
VkRenderPass renderPass,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyRenderPassFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroyRenderPass(
device, renderPass, pAllocator);
}
ALWAYS_INLINE void vkDestroySampler(VkDevice device,
VkSampler sampler,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroySamplerFn(device, sampler,
- pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroySampler(device, sampler,
+ pAllocator);
}
ALWAYS_INLINE void vkDestroySemaphore(VkDevice device,
VkSemaphore semaphore,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroySemaphoreFn(
- device, semaphore, pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroySemaphore(device, semaphore,
+ pAllocator);
}
ALWAYS_INLINE void vkDestroyShaderModule(
VkDevice device,
VkShaderModule shaderModule,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyShaderModuleFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroyShaderModule(
device, shaderModule, pAllocator);
}
ALWAYS_INLINE VkResult vkDeviceWaitIdle(VkDevice device) {
- return gpu::GetVulkanFunctionPointers()->vkDeviceWaitIdleFn(device);
+ return gpu::GetVulkanFunctionPointers()->vkDeviceWaitIdle(device);
}
ALWAYS_INLINE VkResult
vkFlushMappedMemoryRanges(VkDevice device,
uint32_t memoryRangeCount,
const VkMappedMemoryRange* pMemoryRanges) {
- return gpu::GetVulkanFunctionPointers()->vkFlushMappedMemoryRangesFn(
+ return gpu::GetVulkanFunctionPointers()->vkFlushMappedMemoryRanges(
device, memoryRangeCount, pMemoryRanges);
}
ALWAYS_INLINE VkResult vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
- return gpu::GetVulkanFunctionPointers()->vkEndCommandBufferFn(commandBuffer);
+ return gpu::GetVulkanFunctionPointers()->vkEndCommandBuffer(commandBuffer);
}
ALWAYS_INLINE void vkFreeCommandBuffers(
VkDevice device,
VkCommandPool commandPool,
uint32_t commandBufferCount,
const VkCommandBuffer* pCommandBuffers) {
- return gpu::GetVulkanFunctionPointers()->vkFreeCommandBuffersFn(
+ return gpu::GetVulkanFunctionPointers()->vkFreeCommandBuffers(
device, commandPool, commandBufferCount, pCommandBuffers);
}
ALWAYS_INLINE VkResult
@@ -826,78 +830,98 @@ vkFreeDescriptorSets(VkDevice device,
VkDescriptorPool descriptorPool,
uint32_t descriptorSetCount,
const VkDescriptorSet* pDescriptorSets) {
- return gpu::GetVulkanFunctionPointers()->vkFreeDescriptorSetsFn(
+ return gpu::GetVulkanFunctionPointers()->vkFreeDescriptorSets(
device, descriptorPool, descriptorSetCount, pDescriptorSets);
}
ALWAYS_INLINE void vkFreeMemory(VkDevice device,
VkDeviceMemory memory,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkFreeMemoryFn(device, memory,
- pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkFreeMemory(device, memory,
+ pAllocator);
}
ALWAYS_INLINE VkResult
vkInvalidateMappedMemoryRanges(VkDevice device,
uint32_t memoryRangeCount,
const VkMappedMemoryRange* pMemoryRanges) {
- return gpu::GetVulkanFunctionPointers()->vkInvalidateMappedMemoryRangesFn(
+ return gpu::GetVulkanFunctionPointers()->vkInvalidateMappedMemoryRanges(
device, memoryRangeCount, pMemoryRanges);
}
ALWAYS_INLINE void vkGetBufferMemoryRequirements(
VkDevice device,
VkBuffer buffer,
VkMemoryRequirements* pMemoryRequirements) {
- return gpu::GetVulkanFunctionPointers()->vkGetBufferMemoryRequirementsFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetBufferMemoryRequirements(
device, buffer, pMemoryRequirements);
}
+ALWAYS_INLINE void vkGetBufferMemoryRequirements2(
+ VkDevice device,
+ const VkBufferMemoryRequirementsInfo2* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements) {
+ return gpu::GetVulkanFunctionPointers()->vkGetBufferMemoryRequirements2(
+ device, pInfo, pMemoryRequirements);
+}
ALWAYS_INLINE void vkGetDeviceQueue(VkDevice device,
uint32_t queueFamilyIndex,
uint32_t queueIndex,
VkQueue* pQueue) {
- return gpu::GetVulkanFunctionPointers()->vkGetDeviceQueueFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetDeviceQueue(
device, queueFamilyIndex, queueIndex, pQueue);
}
+ALWAYS_INLINE void vkGetDeviceQueue2(VkDevice device,
+ const VkDeviceQueueInfo2* pQueueInfo,
+ VkQueue* pQueue) {
+ return gpu::GetVulkanFunctionPointers()->vkGetDeviceQueue2(device, pQueueInfo,
+ pQueue);
+}
ALWAYS_INLINE VkResult vkGetFenceStatus(VkDevice device, VkFence fence) {
- return gpu::GetVulkanFunctionPointers()->vkGetFenceStatusFn(device, fence);
+ return gpu::GetVulkanFunctionPointers()->vkGetFenceStatus(device, fence);
}
ALWAYS_INLINE void vkGetImageMemoryRequirements(
VkDevice device,
VkImage image,
VkMemoryRequirements* pMemoryRequirements) {
- return gpu::GetVulkanFunctionPointers()->vkGetImageMemoryRequirementsFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetImageMemoryRequirements(
device, image, pMemoryRequirements);
}
+ALWAYS_INLINE void vkGetImageMemoryRequirements2(
+ VkDevice device,
+ const VkImageMemoryRequirementsInfo2* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements) {
+ return gpu::GetVulkanFunctionPointers()->vkGetImageMemoryRequirements2(
+ device, pInfo, pMemoryRequirements);
+}
ALWAYS_INLINE VkResult vkMapMemory(VkDevice device,
VkDeviceMemory memory,
VkDeviceSize offset,
VkDeviceSize size,
VkMemoryMapFlags flags,
void** ppData) {
- return gpu::GetVulkanFunctionPointers()->vkMapMemoryFn(device, memory, offset,
- size, flags, ppData);
+ return gpu::GetVulkanFunctionPointers()->vkMapMemory(device, memory, offset,
+ size, flags, ppData);
}
ALWAYS_INLINE VkResult vkQueueSubmit(VkQueue queue,
uint32_t submitCount,
const VkSubmitInfo* pSubmits,
VkFence fence) {
- return gpu::GetVulkanFunctionPointers()->vkQueueSubmitFn(queue, submitCount,
- pSubmits, fence);
+ return gpu::GetVulkanFunctionPointers()->vkQueueSubmit(queue, submitCount,
+ pSubmits, fence);
}
ALWAYS_INLINE VkResult vkQueueWaitIdle(VkQueue queue) {
- return gpu::GetVulkanFunctionPointers()->vkQueueWaitIdleFn(queue);
+ return gpu::GetVulkanFunctionPointers()->vkQueueWaitIdle(queue);
}
ALWAYS_INLINE VkResult vkResetCommandBuffer(VkCommandBuffer commandBuffer,
VkCommandBufferResetFlags flags) {
- return gpu::GetVulkanFunctionPointers()->vkResetCommandBufferFn(commandBuffer,
- flags);
+ return gpu::GetVulkanFunctionPointers()->vkResetCommandBuffer(commandBuffer,
+ flags);
}
ALWAYS_INLINE VkResult vkResetFences(VkDevice device,
uint32_t fenceCount,
const VkFence* pFences) {
- return gpu::GetVulkanFunctionPointers()->vkResetFencesFn(device, fenceCount,
- pFences);
+ return gpu::GetVulkanFunctionPointers()->vkResetFences(device, fenceCount,
+ pFences);
}
ALWAYS_INLINE void vkUnmapMemory(VkDevice device, VkDeviceMemory memory) {
- return gpu::GetVulkanFunctionPointers()->vkUnmapMemoryFn(device, memory);
+ return gpu::GetVulkanFunctionPointers()->vkUnmapMemory(device, memory);
}
ALWAYS_INLINE void vkUpdateDescriptorSets(
VkDevice device,
@@ -905,7 +929,7 @@ ALWAYS_INLINE void vkUpdateDescriptorSets(
const VkWriteDescriptorSet* pDescriptorWrites,
uint32_t descriptorCopyCount,
const VkCopyDescriptorSet* pDescriptorCopies) {
- return gpu::GetVulkanFunctionPointers()->vkUpdateDescriptorSetsFn(
+ return gpu::GetVulkanFunctionPointers()->vkUpdateDescriptorSets(
device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
}
@@ -914,39 +938,18 @@ ALWAYS_INLINE VkResult vkWaitForFences(VkDevice device,
const VkFence* pFences,
VkBool32 waitAll,
uint64_t timeout) {
- return gpu::GetVulkanFunctionPointers()->vkWaitForFencesFn(
+ return gpu::GetVulkanFunctionPointers()->vkWaitForFences(
device, fenceCount, pFences, waitAll, timeout);
}
-ALWAYS_INLINE void vkGetDeviceQueue2(VkDevice device,
- const VkDeviceQueueInfo2* pQueueInfo,
- VkQueue* pQueue) {
- return gpu::GetVulkanFunctionPointers()->vkGetDeviceQueue2Fn(
- device, pQueueInfo, pQueue);
-}
-ALWAYS_INLINE void vkGetBufferMemoryRequirements2(
- VkDevice device,
- const VkBufferMemoryRequirementsInfo2* pInfo,
- VkMemoryRequirements2* pMemoryRequirements) {
- return gpu::GetVulkanFunctionPointers()->vkGetBufferMemoryRequirements2Fn(
- device, pInfo, pMemoryRequirements);
-}
-ALWAYS_INLINE void vkGetImageMemoryRequirements2(
- VkDevice device,
- const VkImageMemoryRequirementsInfo2* pInfo,
- VkMemoryRequirements2* pMemoryRequirements) {
- return gpu::GetVulkanFunctionPointers()->vkGetImageMemoryRequirements2Fn(
- device, pInfo, pMemoryRequirements);
-}
-
#if defined(OS_ANDROID)
ALWAYS_INLINE VkResult vkGetAndroidHardwareBufferPropertiesANDROID(
VkDevice device,
const struct AHardwareBuffer* buffer,
VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
return gpu::GetVulkanFunctionPointers()
- ->vkGetAndroidHardwareBufferPropertiesANDROIDFn(device, buffer,
- pProperties);
+ ->vkGetAndroidHardwareBufferPropertiesANDROID(device, buffer,
+ pProperties);
}
#endif // defined(OS_ANDROID)
@@ -955,13 +958,13 @@ ALWAYS_INLINE VkResult
vkGetSemaphoreFdKHR(VkDevice device,
const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
int* pFd) {
- return gpu::GetVulkanFunctionPointers()->vkGetSemaphoreFdKHRFn(
- device, pGetFdInfo, pFd);
+ return gpu::GetVulkanFunctionPointers()->vkGetSemaphoreFdKHR(device,
+ pGetFdInfo, pFd);
}
ALWAYS_INLINE VkResult vkImportSemaphoreFdKHR(
VkDevice device,
const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) {
- return gpu::GetVulkanFunctionPointers()->vkImportSemaphoreFdKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkImportSemaphoreFdKHR(
device, pImportSemaphoreFdInfo);
}
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
@@ -971,14 +974,14 @@ ALWAYS_INLINE VkResult vkGetSemaphoreWin32HandleKHR(
VkDevice device,
const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo,
HANDLE* pHandle) {
- return gpu::GetVulkanFunctionPointers()->vkGetSemaphoreWin32HandleKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetSemaphoreWin32HandleKHR(
device, pGetWin32HandleInfo, pHandle);
}
ALWAYS_INLINE VkResult
vkImportSemaphoreWin32HandleKHR(VkDevice device,
const VkImportSemaphoreWin32HandleInfoKHR*
pImportSemaphoreWin32HandleInfo) {
- return gpu::GetVulkanFunctionPointers()->vkImportSemaphoreWin32HandleKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkImportSemaphoreWin32HandleKHR(
device, pImportSemaphoreWin32HandleInfo);
}
#endif // defined(OS_WIN)
@@ -987,15 +990,15 @@ vkImportSemaphoreWin32HandleKHR(VkDevice device,
ALWAYS_INLINE VkResult vkGetMemoryFdKHR(VkDevice device,
const VkMemoryGetFdInfoKHR* pGetFdInfo,
int* pFd) {
- return gpu::GetVulkanFunctionPointers()->vkGetMemoryFdKHRFn(device,
- pGetFdInfo, pFd);
+ return gpu::GetVulkanFunctionPointers()->vkGetMemoryFdKHR(device, pGetFdInfo,
+ pFd);
}
ALWAYS_INLINE VkResult
vkGetMemoryFdPropertiesKHR(VkDevice device,
VkExternalMemoryHandleTypeFlagBits handleType,
int fd,
VkMemoryFdPropertiesKHR* pMemoryFdProperties) {
- return gpu::GetVulkanFunctionPointers()->vkGetMemoryFdPropertiesKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetMemoryFdPropertiesKHR(
device, handleType, fd, pMemoryFdProperties);
}
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
@@ -1005,7 +1008,7 @@ ALWAYS_INLINE VkResult vkGetMemoryWin32HandleKHR(
VkDevice device,
const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo,
HANDLE* pHandle) {
- return gpu::GetVulkanFunctionPointers()->vkGetMemoryWin32HandleKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetMemoryWin32HandleKHR(
device, pGetWin32HandleInfo, pHandle);
}
ALWAYS_INLINE VkResult vkGetMemoryWin32HandlePropertiesKHR(
@@ -1013,33 +1016,32 @@ ALWAYS_INLINE VkResult vkGetMemoryWin32HandlePropertiesKHR(
VkExternalMemoryHandleTypeFlagBits handleType,
HANDLE handle,
VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties) {
- return gpu::GetVulkanFunctionPointers()
- ->vkGetMemoryWin32HandlePropertiesKHRFn(device, handleType, handle,
- pMemoryWin32HandleProperties);
+ return gpu::GetVulkanFunctionPointers()->vkGetMemoryWin32HandlePropertiesKHR(
+ device, handleType, handle, pMemoryWin32HandleProperties);
}
#endif // defined(OS_WIN)
#if defined(OS_FUCHSIA)
#define vkImportSemaphoreZirconHandleFUCHSIA \
- gpu::GetVulkanFunctionPointers()->vkImportSemaphoreZirconHandleFUCHSIAFn
+ gpu::GetVulkanFunctionPointers()->vkImportSemaphoreZirconHandleFUCHSIA
#define vkGetSemaphoreZirconHandleFUCHSIA \
- gpu::GetVulkanFunctionPointers()->vkGetSemaphoreZirconHandleFUCHSIAFn
+ gpu::GetVulkanFunctionPointers()->vkGetSemaphoreZirconHandleFUCHSIA
#endif // defined(OS_FUCHSIA)
#if defined(OS_FUCHSIA)
#define vkGetMemoryZirconHandleFUCHSIA \
- gpu::GetVulkanFunctionPointers()->vkGetMemoryZirconHandleFUCHSIAFn
+ gpu::GetVulkanFunctionPointers()->vkGetMemoryZirconHandleFUCHSIA
#endif // defined(OS_FUCHSIA)
#if defined(OS_FUCHSIA)
#define vkCreateBufferCollectionFUCHSIA \
- gpu::GetVulkanFunctionPointers()->vkCreateBufferCollectionFUCHSIAFn
+ gpu::GetVulkanFunctionPointers()->vkCreateBufferCollectionFUCHSIA
#define vkSetBufferCollectionConstraintsFUCHSIA \
- gpu::GetVulkanFunctionPointers()->vkSetBufferCollectionConstraintsFUCHSIAFn
+ gpu::GetVulkanFunctionPointers()->vkSetBufferCollectionConstraintsFUCHSIA
#define vkGetBufferCollectionPropertiesFUCHSIA \
- gpu::GetVulkanFunctionPointers()->vkGetBufferCollectionPropertiesFUCHSIAFn
+ gpu::GetVulkanFunctionPointers()->vkGetBufferCollectionPropertiesFUCHSIA
#define vkDestroyBufferCollectionFUCHSIA \
- gpu::GetVulkanFunctionPointers()->vkDestroyBufferCollectionFUCHSIAFn
+ gpu::GetVulkanFunctionPointers()->vkDestroyBufferCollectionFUCHSIA
#endif // defined(OS_FUCHSIA)
ALWAYS_INLINE VkResult vkAcquireNextImageKHR(VkDevice device,
@@ -1048,7 +1050,7 @@ ALWAYS_INLINE VkResult vkAcquireNextImageKHR(VkDevice device,
VkSemaphore semaphore,
VkFence fence,
uint32_t* pImageIndex) {
- return gpu::GetVulkanFunctionPointers()->vkAcquireNextImageKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkAcquireNextImageKHR(
device, swapchain, timeout, semaphore, fence, pImageIndex);
}
ALWAYS_INLINE VkResult
@@ -1056,27 +1058,27 @@ vkCreateSwapchainKHR(VkDevice device,
const VkSwapchainCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSwapchainKHR* pSwapchain) {
- return gpu::GetVulkanFunctionPointers()->vkCreateSwapchainKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateSwapchainKHR(
device, pCreateInfo, pAllocator, pSwapchain);
}
ALWAYS_INLINE void vkDestroySwapchainKHR(
VkDevice device,
VkSwapchainKHR swapchain,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroySwapchainKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroySwapchainKHR(
device, swapchain, pAllocator);
}
ALWAYS_INLINE VkResult vkGetSwapchainImagesKHR(VkDevice device,
VkSwapchainKHR swapchain,
uint32_t* pSwapchainImageCount,
VkImage* pSwapchainImages) {
- return gpu::GetVulkanFunctionPointers()->vkGetSwapchainImagesKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetSwapchainImagesKHR(
device, swapchain, pSwapchainImageCount, pSwapchainImages);
}
ALWAYS_INLINE VkResult vkQueuePresentKHR(VkQueue queue,
const VkPresentInfoKHR* pPresentInfo) {
- return gpu::GetVulkanFunctionPointers()->vkQueuePresentKHRFn(queue,
- pPresentInfo);
+ return gpu::GetVulkanFunctionPointers()->vkQueuePresentKHR(queue,
+ pPresentInfo);
}
#endif // GPU_VULKAN_VULKAN_FUNCTION_POINTERS_H_ \ No newline at end of file
diff --git a/chromium/gpu/vulkan/vulkan_image.cc b/chromium/gpu/vulkan/vulkan_image.cc
index c01245faf1f..fa199dd224d 100644
--- a/chromium/gpu/vulkan/vulkan_image.cc
+++ b/chromium/gpu/vulkan/vulkan_image.cc
@@ -8,6 +8,7 @@
#include <algorithm>
+#include "base/logging.h"
#include "base/macros.h"
#include "base/optional.h"
#include "base/stl_util.h"
@@ -103,7 +104,8 @@ std::unique_ptr<VulkanImage> VulkanImage::Create(
VkImageTiling image_tiling,
VkDeviceSize device_size,
uint32_t memory_type_index,
- base::Optional<VulkanYCbCrInfo>& ycbcr_info) {
+ base::Optional<VulkanYCbCrInfo>& ycbcr_info,
+ VkImageCreateFlags flags) {
auto image = std::make_unique<VulkanImage>(util::PassKey<VulkanImage>());
image->device_queue_ = device_queue;
image->image_ = vk_image;
@@ -114,6 +116,7 @@ std::unique_ptr<VulkanImage> VulkanImage::Create(
image->device_size_ = device_size;
image->memory_type_index_ = memory_type_index;
image->ycbcr_info_ = ycbcr_info;
+ image->flags_ = flags;
return image;
}
@@ -203,7 +206,7 @@ bool VulkanImage::Initialize(VulkanDeviceQueue* device_queue,
vkCreateImage(vk_device, &create_info, nullptr /* pAllocator */, &image_);
if (result != VK_SUCCESS) {
DLOG(ERROR) << "vkCreateImage failed result:" << result;
- device_queue_ = VK_NULL_HANDLE;
+ device_queue_ = nullptr;
return false;
}
@@ -339,4 +342,4 @@ bool VulkanImage::InitializeWithExternalMemory(VulkanDeviceQueue* device_queue,
nullptr /* requirements */);
}
-} // namespace gpu \ No newline at end of file
+} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_image.h b/chromium/gpu/vulkan/vulkan_image.h
index 888bf865241..d48dd9bf706 100644
--- a/chromium/gpu/vulkan/vulkan_image.h
+++ b/chromium/gpu/vulkan/vulkan_image.h
@@ -74,7 +74,8 @@ class COMPONENT_EXPORT(VULKAN) VulkanImage {
VkImageTiling image_tiling,
VkDeviceSize device_size,
uint32_t memory_type_index,
- base::Optional<VulkanYCbCrInfo>& ycbcr_info);
+ base::Optional<VulkanYCbCrInfo>& ycbcr_info,
+ VkImageCreateFlags flags = 0);
void Destroy();
diff --git a/chromium/gpu/vulkan/vulkan_image_android.cc b/chromium/gpu/vulkan/vulkan_image_android.cc
index 5da67ae4f68..7d64386a332 100644
--- a/chromium/gpu/vulkan/vulkan_image_android.cc
+++ b/chromium/gpu/vulkan/vulkan_image_android.cc
@@ -5,6 +5,7 @@
#include "gpu/vulkan/vulkan_image.h"
#include "base/android/android_hardware_buffer_compat.h"
+#include "base/logging.h"
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
@@ -138,4 +139,4 @@ bool VulkanImage::InitializeFromGpuMemoryBufferHandle(
return true;
}
-} // namespace gpu \ No newline at end of file
+} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_image_fuchsia.cc b/chromium/gpu/vulkan/vulkan_image_fuchsia.cc
index 6748ed5a228..b462ba9e99b 100644
--- a/chromium/gpu/vulkan/vulkan_image_fuchsia.cc
+++ b/chromium/gpu/vulkan/vulkan_image_fuchsia.cc
@@ -4,6 +4,7 @@
#include "gpu/vulkan/vulkan_image.h"
+#include "base/logging.h"
#include "gpu/vulkan/fuchsia/vulkan_fuchsia_ext.h"
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
diff --git a/chromium/gpu/vulkan/vulkan_image_linux.cc b/chromium/gpu/vulkan/vulkan_image_linux.cc
index 789b8e3b0e4..41ee3996cf5 100644
--- a/chromium/gpu/vulkan/vulkan_image_linux.cc
+++ b/chromium/gpu/vulkan/vulkan_image_linux.cc
@@ -4,6 +4,7 @@
#include "gpu/vulkan/vulkan_image.h"
+#include "base/logging.h"
#include "gpu/vulkan/vulkan_device_queue.h"
namespace gpu {
diff --git a/chromium/gpu/vulkan/vulkan_image_unittest.cc b/chromium/gpu/vulkan/vulkan_image_unittest.cc
index 0338611e057..54c717fc747 100644
--- a/chromium/gpu/vulkan/vulkan_image_unittest.cc
+++ b/chromium/gpu/vulkan/vulkan_image_unittest.cc
@@ -4,8 +4,10 @@
#include "gpu/vulkan/vulkan_image.h"
+#include "base/logging.h"
#include "build/build_config.h"
#include "gpu/config/gpu_info_collector.h"
+#include "gpu/config/gpu_test_config.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "gpu/vulkan/tests/basic_vulkan_test.h"
#include "gpu/vulkan/vulkan_device_queue.h"
@@ -54,18 +56,9 @@ TEST_F(VulkanImageTest, Create) {
TEST_F(VulkanImageTest, CreateWithExternalMemory) {
{
- GPUInfo gpu_info;
- CHECK(CollectBasicGraphicsInfo(&gpu_info));
-
- // TODO(crbug.com/1069516): Fails on Intel driver >= 26.20.100.7158; this is
- // seen on Win10 FYI x64 Exp Release (Intel HD 630), with 26.20.100.7870.
- if (gpu_info.gpu.driver_version == "26.20.100.7870") {
- // Can't be sure primary GPU is being used, so check it's the only one
- // (aside from the Microsoft software renderer).
- CHECK(gpu_info.secondary_gpus.size() == 1);
- // Skip test.
+ // TODO(crbug.com/1069516) : Fails on current driver version on this bot.
+ if (GPUTestBotConfig::CurrentConfigMatches("Win10"))
return;
- }
}
constexpr gfx::Size size(100, 100);
diff --git a/chromium/gpu/vulkan/vulkan_image_win.cc b/chromium/gpu/vulkan/vulkan_image_win.cc
index 6bd6ef2a56b..6501ceb1644 100644
--- a/chromium/gpu/vulkan/vulkan_image_win.cc
+++ b/chromium/gpu/vulkan/vulkan_image_win.cc
@@ -4,6 +4,7 @@
#include "gpu/vulkan/vulkan_image.h"
+#include "base/logging.h"
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
diff --git a/chromium/gpu/vulkan/vulkan_implementation.h b/chromium/gpu/vulkan/vulkan_implementation.h
index 3ee8cd5f4ad..42380494565 100644
--- a/chromium/gpu/vulkan/vulkan_implementation.h
+++ b/chromium/gpu/vulkan/vulkan_implementation.h
@@ -134,7 +134,9 @@ class COMPONENT_EXPORT(VULKAN) VulkanImplementation {
virtual std::unique_ptr<SysmemBufferCollection>
RegisterSysmemBufferCollection(VkDevice device,
gfx::SysmemBufferCollectionId id,
- zx::channel token) = 0;
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) = 0;
#endif // defined(OS_FUCHSIA)
bool use_swiftshader() const { return use_swiftshader_; }
diff --git a/chromium/gpu/vulkan/vulkan_instance.cc b/chromium/gpu/vulkan/vulkan_instance.cc
index 49d8709a177..c68a852d2a4 100644
--- a/chromium/gpu/vulkan/vulkan_instance.cc
+++ b/chromium/gpu/vulkan/vulkan_instance.cc
@@ -79,19 +79,19 @@ bool VulkanInstance::Initialize(
if (!vulkan_function_pointers->BindUnassociatedFunctionPointers())
return false;
- if (vulkan_function_pointers->vkEnumerateInstanceVersionFn)
- vkEnumerateInstanceVersion(&vulkan_info_.api_version);
+ VkResult result = vkEnumerateInstanceVersion(&vulkan_info_.api_version);
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "vkEnumerateInstanceVersion() failed: " << result;
+ return false;
+ }
- if (vulkan_info_.api_version < VK_MAKE_VERSION(1, 1, 0))
+ if (vulkan_info_.api_version < kVulkanRequiredApiVersion)
return false;
gpu::crash_keys::vulkan_api_version.Set(
VkVersionToString(vulkan_info_.api_version));
- // Use Vulkan 1.1 if it's available.
- vulkan_info_.used_api_version = VK_MAKE_VERSION(1, 1, 0);
-
- VkResult result = VK_SUCCESS;
+ vulkan_info_.used_api_version = kVulkanRequiredApiVersion;
VkApplicationInfo app_info = {};
app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
@@ -304,7 +304,8 @@ bool VulkanInstance::CollectInfo() {
// API version of the VkPhysicalDevice, so we need to check the GPU's
// API version instead of just testing to see if
// vkGetPhysicalDeviceFeatures2 is non-null.
- if (info.properties.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ static_assert(kVulkanRequiredApiVersion >= VK_API_VERSION_1_1, "");
+ if (info.properties.apiVersion >= kVulkanRequiredApiVersion) {
VkPhysicalDeviceSamplerYcbcrConversionFeatures ycbcr_conversion_features =
{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES};
VkPhysicalDeviceProtectedMemoryFeatures protected_memory_feature = {
@@ -319,8 +320,6 @@ bool VulkanInstance::CollectInfo() {
info.feature_sampler_ycbcr_conversion =
ycbcr_conversion_features.samplerYcbcrConversion;
info.feature_protected_memory = protected_memory_feature.protectedMemory;
- } else {
- vkGetPhysicalDeviceFeatures(device, &info.features);
}
count = 0;
diff --git a/chromium/gpu/vulkan/vulkan_instance.h b/chromium/gpu/vulkan/vulkan_instance.h
index bc22cd4d312..2b119789f58 100644
--- a/chromium/gpu/vulkan/vulkan_instance.h
+++ b/chromium/gpu/vulkan/vulkan_instance.h
@@ -8,8 +8,8 @@
#include <vulkan/vulkan.h>
#include <memory>
+#include "base/check_op.h"
#include "base/component_export.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "gpu/config/vulkan_info.h"
#include "ui/gfx/extension_set.h"
diff --git a/chromium/gpu/vulkan/vulkan_surface.cc b/chromium/gpu/vulkan/vulkan_surface.cc
index 8e20ccb7ded..5dfdff528c3 100644
--- a/chromium/gpu/vulkan/vulkan_surface.cc
+++ b/chromium/gpu/vulkan/vulkan_surface.cc
@@ -8,8 +8,10 @@
#include <algorithm>
+#include "base/logging.h"
#include "base/macros.h"
#include "base/stl_util.h"
+#include "base/threading/scoped_blocking_call.h"
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
#include "gpu/vulkan/vulkan_swap_chain.h"
@@ -68,6 +70,9 @@ gfx::OverlayTransform FromVkSurfaceTransformFlag(
}
}
+// Minimum VkImages in a vulkan swap chain.
+uint32_t kMinImageCount = 3u;
+
} // namespace
VulkanSurface::~VulkanSurface() {
@@ -165,8 +170,6 @@ bool VulkanSurface::Initialize(VulkanDeviceQueue* device_queue,
return false;
}
- image_count_ = std::max(surface_caps.minImageCount, 3u);
-
return true;
}
@@ -184,10 +187,18 @@ gfx::SwapResult VulkanSurface::SwapBuffers() {
}
gfx::SwapResult VulkanSurface::PostSubBuffer(const gfx::Rect& rect) {
- return swap_chain_->PresentBuffer(rect);
+ return swap_chain_->PostSubBuffer(rect);
+}
+
+void VulkanSurface::PostSubBufferAsync(
+ const gfx::Rect& rect,
+ VulkanSwapChain::PostSubBufferCompletionCallback callback) {
+ swap_chain_->PostSubBufferAsync(rect, std::move(callback));
}
void VulkanSurface::Finish() {
+ base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
+ base::BlockingType::WILL_BLOCK);
vkQueueWaitIdle(device_queue_->GetVulkanQueue());
}
@@ -259,12 +270,12 @@ bool VulkanSurface::CreateSwapChain(const gfx::Size& size,
transform_ = transform;
auto swap_chain = std::make_unique<VulkanSwapChain>();
-
// Create swap chain.
- DCHECK_EQ(image_count_, std::max(surface_caps.minImageCount, 3u));
- if (!swap_chain->Initialize(
- device_queue_, surface_, surface_format_, image_size_, image_count_,
- vk_transform, enforce_protected_memory_, std::move(swap_chain_))) {
+ auto min_image_count = std::max(surface_caps.minImageCount, kMinImageCount);
+ if (!swap_chain->Initialize(device_queue_, surface_, surface_format_,
+ image_size_, min_image_count, vk_transform,
+ enforce_protected_memory_,
+ std::move(swap_chain_))) {
return false;
}
diff --git a/chromium/gpu/vulkan/vulkan_surface.h b/chromium/gpu/vulkan/vulkan_surface.h
index 7620a2e4ebf..e61cd97bb8f 100644
--- a/chromium/gpu/vulkan/vulkan_surface.h
+++ b/chromium/gpu/vulkan/vulkan_surface.h
@@ -46,6 +46,9 @@ class COMPONENT_EXPORT(VULKAN) VulkanSurface {
gfx::SwapResult SwapBuffers();
gfx::SwapResult PostSubBuffer(const gfx::Rect& rect);
+ void PostSubBufferAsync(
+ const gfx::Rect& rect,
+ VulkanSwapChain::PostSubBufferCompletionCallback callback);
void Finish();
@@ -62,7 +65,6 @@ class COMPONENT_EXPORT(VULKAN) VulkanSurface {
uint32_t swap_chain_generation() const { return swap_chain_generation_; }
const gfx::Size& image_size() const { return image_size_; }
gfx::OverlayTransform transform() const { return transform_; }
- uint32_t image_count() const { return image_count_; }
VkSurfaceFormatKHR surface_format() const { return surface_format_; }
private:
@@ -87,9 +89,6 @@ class COMPONENT_EXPORT(VULKAN) VulkanSurface {
// Swap chain pre-transform.
gfx::OverlayTransform transform_ = gfx::OVERLAY_TRANSFORM_INVALID;
- // Swap chain image count.
- uint32_t image_count_ = 0u;
-
std::unique_ptr<VulkanSwapChain> swap_chain_;
DISALLOW_COPY_AND_ASSIGN(VulkanSurface);
diff --git a/chromium/gpu/vulkan/vulkan_swap_chain.cc b/chromium/gpu/vulkan/vulkan_swap_chain.cc
index c88b19f8ded..4d7eede7033 100644
--- a/chromium/gpu/vulkan/vulkan_swap_chain.cc
+++ b/chromium/gpu/vulkan/vulkan_swap_chain.cc
@@ -5,6 +5,11 @@
#include "gpu/vulkan/vulkan_swap_chain.h"
#include "base/bind.h"
+#include "base/logging.h"
+#include "base/task/task_traits.h"
+#include "base/task/thread_pool.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "gpu/vulkan/vulkan_command_buffer.h"
#include "gpu/vulkan/vulkan_command_pool.h"
@@ -17,7 +22,7 @@ namespace {
VkSemaphore CreateSemaphore(VkDevice vk_device) {
// Generic semaphore creation structure.
- VkSemaphoreCreateInfo semaphore_create_info = {
+ constexpr VkSemaphoreCreateInfo semaphore_create_info = {
VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO};
VkSemaphore vk_semaphore;
@@ -30,11 +35,17 @@ VkSemaphore CreateSemaphore(VkDevice vk_device) {
} // namespace
-VulkanSwapChain::VulkanSwapChain() {}
+VulkanSwapChain::VulkanSwapChain() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+}
VulkanSwapChain::~VulkanSwapChain() {
+#if DCHECK_IS_ON()
+ base::AutoLock auto_lock(lock_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(images_.empty());
DCHECK_EQ(static_cast<VkSwapchainKHR>(VK_NULL_HANDLE), swap_chain_);
+#endif
}
bool VulkanSwapChain::Initialize(
@@ -46,8 +57,12 @@ bool VulkanSwapChain::Initialize(
VkSurfaceTransformFlagBitsKHR pre_transform,
bool use_protected_memory,
std::unique_ptr<VulkanSwapChain> old_swap_chain) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(device_queue);
DCHECK(!use_protected_memory || device_queue->allow_protected_memory());
+
+ task_runner_ = base::ThreadTaskRunnerHandle::Get();
use_protected_memory_ = use_protected_memory;
device_queue_ = device_queue;
is_incremental_present_supported_ =
@@ -57,100 +72,67 @@ bool VulkanSwapChain::Initialize(
return InitializeSwapChain(surface, surface_format, image_size,
min_image_count, pre_transform,
use_protected_memory, std::move(old_swap_chain)) &&
- InitializeSwapImages(surface_format);
+ InitializeSwapImages(surface_format) && AcquireNextImage();
}
void VulkanSwapChain::Destroy() {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ WaitUntilPostSubBufferAsyncFinished();
+
DCHECK(!is_writing_);
DestroySwapImages();
DestroySwapChain();
}
-gfx::SwapResult VulkanSwapChain::PresentBuffer(const gfx::Rect& rect) {
- DCHECK(acquired_image_);
- DCHECK(end_write_semaphore_ != VK_NULL_HANDLE);
-
- VkResult result = VK_SUCCESS;
- VkDevice device = device_queue_->GetVulkanDevice();
- VkQueue queue = device_queue_->GetVulkanQueue();
- auto* fence_helper = device_queue_->GetFenceHelper();
-
- auto& current_image_data = images_[*acquired_image_];
- if (current_image_data.layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
- {
- current_image_data.command_buffer->Clear();
- ScopedSingleUseCommandBufferRecorder recorder(
- *current_image_data.command_buffer);
- current_image_data.command_buffer->TransitionImageLayout(
- current_image_data.image, current_image_data.layout,
- VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
- }
- current_image_data.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
-
- VkSemaphore vk_semaphore = CreateSemaphore(device);
- // Submit our command_buffer for the current buffer. It sets the image
- // layout for presenting.
- if (!current_image_data.command_buffer->Submit(1, &end_write_semaphore_, 1,
- &vk_semaphore)) {
- vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
- return gfx::SwapResult::SWAP_FAILED;
- }
- current_image_data.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
- fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(end_write_semaphore_);
- end_write_semaphore_ = vk_semaphore;
- }
-
- VkPresentInfoKHR present_info = {VK_STRUCTURE_TYPE_PRESENT_INFO_KHR};
- present_info.waitSemaphoreCount = 1;
- present_info.pWaitSemaphores = &end_write_semaphore_;
- present_info.swapchainCount = 1;
- present_info.pSwapchains = &swap_chain_;
- present_info.pImageIndices = &acquired_image_.value();
-
- VkRectLayerKHR rect_layer;
- VkPresentRegionKHR present_region;
- VkPresentRegionsKHR present_regions = {VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR};
- if (is_incremental_present_supported_) {
- rect_layer.offset = {rect.x(), rect.y()};
- rect_layer.extent = {rect.width(), rect.height()};
- rect_layer.layer = 0;
+gfx::SwapResult VulkanSwapChain::PostSubBuffer(const gfx::Rect& rect) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK(!has_pending_post_sub_buffer_);
- present_region.rectangleCount = 1;
- present_region.pRectangles = &rect_layer;
-
- present_regions.swapchainCount = 1;
- present_regions.pRegions = &present_region;
-
- present_info.pNext = &present_regions;
- }
+ if (!PresentBuffer(rect))
+ return gfx::SwapResult::SWAP_FAILED;
- result = vkQueuePresentKHR(queue, &present_info);
- if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
- LOG(DFATAL) << "vkQueuePresentKHR() failed: " << result;
+ if (!AcquireNextImage())
return gfx::SwapResult::SWAP_FAILED;
- }
- current_image_data.is_acquired = false;
- LOG_IF(ERROR, result == VK_SUBOPTIMAL_KHR) << "Swapchian is suboptimal.";
+ return gfx::SwapResult::SWAP_ACK;
+}
- if (current_image_data.present_begin_semaphore != VK_NULL_HANDLE) {
- // |present_begin_semaphore| for the previous present for this image can be
- // safely destroyed after semaphore got from vkAcquireNextImageHKR() is
- // passed. That acquired semaphore should be already waited on for a
- // submitted GPU work. So we can safely enqueue the
- // |present_begin_semaphore| for cleanup here (the enqueued semaphore will
- // be destroyed when all submitted GPU work is finished).
- fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(
- current_image_data.present_begin_semaphore);
+void VulkanSwapChain::PostSubBufferAsync(
+ const gfx::Rect& rect,
+ PostSubBufferCompletionCallback callback) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK(!has_pending_post_sub_buffer_);
+
+ if (!PresentBuffer(rect)) {
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(std::move(callback), gfx::SwapResult::SWAP_FAILED));
+ return;
}
- // We are not sure when the semaphore is not used by present engine, so don't
- // destroy the semaphore until the image is returned from present engine.
- current_image_data.present_begin_semaphore = end_write_semaphore_;
- end_write_semaphore_ = VK_NULL_HANDLE;
- in_present_images_.emplace_back(*acquired_image_);
- acquired_image_.reset();
- return gfx::SwapResult::SWAP_ACK;
+ DCHECK_EQ(state_, VK_SUCCESS);
+
+ has_pending_post_sub_buffer_ = true;
+
+ post_sub_buffer_task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(
+ [](VulkanSwapChain* self, PostSubBufferCompletionCallback callback) {
+ base::AutoLock auto_lock(self->lock_);
+ DCHECK(self->has_pending_post_sub_buffer_);
+ auto swap_result = self->AcquireNextImage()
+ ? gfx::SwapResult::SWAP_ACK
+ : gfx::SwapResult::SWAP_FAILED;
+ self->task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(std::move(callback), swap_result));
+ self->has_pending_post_sub_buffer_ = false;
+ self->condition_variable_.Signal();
+ },
+ base::Unretained(this), std::move(callback)));
}
bool VulkanSwapChain::InitializeSwapChain(
@@ -161,29 +143,35 @@ bool VulkanSwapChain::InitializeSwapChain(
VkSurfaceTransformFlagBitsKHR pre_transform,
bool use_protected_memory,
std::unique_ptr<VulkanSwapChain> old_swap_chain) {
- DCHECK(!acquired_image_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
VkDevice device = device_queue_->GetVulkanDevice();
VkResult result = VK_SUCCESS;
- VkSwapchainCreateInfoKHR swap_chain_create_info = {};
- swap_chain_create_info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
- swap_chain_create_info.flags =
- use_protected_memory ? VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR : 0;
- swap_chain_create_info.surface = surface;
- swap_chain_create_info.minImageCount = min_image_count,
- swap_chain_create_info.imageFormat = surface_format.format;
- swap_chain_create_info.imageColorSpace = surface_format.colorSpace;
- swap_chain_create_info.imageExtent.width = image_size.width();
- swap_chain_create_info.imageExtent.height = image_size.height();
- swap_chain_create_info.imageArrayLayers = 1;
- swap_chain_create_info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
- swap_chain_create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
- swap_chain_create_info.preTransform = pre_transform;
- swap_chain_create_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
- swap_chain_create_info.presentMode = VK_PRESENT_MODE_FIFO_KHR;
- swap_chain_create_info.clipped = true;
- swap_chain_create_info.oldSwapchain =
- old_swap_chain ? old_swap_chain->swap_chain_ : VK_NULL_HANDLE;
+ VkSwapchainCreateInfoKHR swap_chain_create_info = {
+ .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
+ .flags = use_protected_memory ? VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR : 0,
+ .surface = surface,
+ .minImageCount = min_image_count,
+ .imageFormat = surface_format.format,
+ .imageColorSpace = surface_format.colorSpace,
+ .imageExtent = {image_size.width(), image_size.height()},
+ .imageArrayLayers = 1,
+ .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
+ .imageSharingMode = VK_SHARING_MODE_EXCLUSIVE,
+ .preTransform = pre_transform,
+ .compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
+ .presentMode = VK_PRESENT_MODE_FIFO_KHR,
+ .clipped = VK_TRUE,
+ .oldSwapchain = VK_NULL_HANDLE,
+ };
+ if (old_swap_chain) {
+ base::AutoLock auto_lock(old_swap_chain->lock_);
+ old_swap_chain->WaitUntilPostSubBufferAsyncFinished();
+ swap_chain_create_info.oldSwapchain = old_swap_chain->swap_chain_;
+ // Reuse |post_sub_buffer_task_runner_| from the |old_swap_chain|.
+ post_sub_buffer_task_runner_ = old_swap_chain->post_sub_buffer_task_runner_;
+ }
VkSwapchainKHR new_swap_chain = VK_NULL_HANDLE;
result = vkCreateSwapchainKHR(device, &swap_chain_create_info, nullptr,
@@ -204,10 +192,18 @@ bool VulkanSwapChain::InitializeSwapChain(
size_ = gfx::Size(swap_chain_create_info.imageExtent.width,
swap_chain_create_info.imageExtent.height);
+ if (!post_sub_buffer_task_runner_) {
+ post_sub_buffer_task_runner_ = base::ThreadPool::CreateSequencedTaskRunner(
+ {base::TaskPriority::USER_BLOCKING,
+ base::TaskShutdownBehavior::BLOCK_SHUTDOWN, base::MayBlock()});
+ }
+
return true;
}
void VulkanSwapChain::DestroySwapChain() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
if (swap_chain_ == VK_NULL_HANDLE)
return;
vkDestroySwapchainKHR(device_queue_->GetVulkanDevice(), swap_chain_,
@@ -217,6 +213,8 @@ void VulkanSwapChain::DestroySwapChain() {
bool VulkanSwapChain::InitializeSwapImages(
const VkSurfaceFormatKHR& surface_format) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
VkDevice device = device_queue_->GetVulkanDevice();
VkResult result = VK_SUCCESS;
@@ -250,6 +248,8 @@ bool VulkanSwapChain::InitializeSwapImages(
}
void VulkanSwapChain::DestroySwapImages() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
if (end_write_semaphore_)
vkDestroySemaphore(device_queue_->GetVulkanDevice(), end_write_semaphore_,
nullptr /* pAllocator */);
@@ -281,30 +281,38 @@ bool VulkanSwapChain::BeginWriteCurrentImage(VkImage* image,
uint32_t* image_index,
VkImageLayout* image_layout,
VkSemaphore* semaphore) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(image);
DCHECK(image_index);
DCHECK(image_layout);
DCHECK(semaphore);
DCHECK(!is_writing_);
- VkSemaphore vk_semaphore = VK_NULL_HANDLE;
+ if (state_ != VK_SUCCESS)
+ return false;
+
+ if (!acquired_image_)
+ return false;
+
+ auto& current_image_data = images_[*acquired_image_];
- if (!acquired_image_) {
+ VkSemaphore vk_semaphore = VK_NULL_HANDLE;
+ if (current_image_data.present_end_semaphore != VK_NULL_HANDLE) {
DCHECK(end_write_semaphore_ == VK_NULL_HANDLE);
- if (!AcquireNextImage())
- return false;
- DCHECK(acquired_image_);
- std::swap(vk_semaphore, images_[*acquired_image_].present_end_semaphore);
+ vk_semaphore = current_image_data.present_end_semaphore;
+ current_image_data.present_end_semaphore = VK_NULL_HANDLE;
} else {
- // In this case, PresentBuffer() is not called after
+ DCHECK(end_write_semaphore_ != VK_NULL_HANDLE);
+ // In this case, PostSubBuffer() is not called after
// {Begin,End}WriteCurrentImage pairs, |end_write_semaphore_| should be
// waited on before writing the image again.
- std::swap(vk_semaphore, end_write_semaphore_);
+ vk_semaphore = end_write_semaphore_;
+ end_write_semaphore_ = VK_NULL_HANDLE;
}
- auto& current_image_data = images_[*acquired_image_];
*image = current_image_data.image;
- *image_index = acquired_image_.value();
+ *image_index = *acquired_image_;
*image_layout = current_image_data.layout;
*semaphore = vk_semaphore;
is_writing_ = true;
@@ -314,6 +322,8 @@ bool VulkanSwapChain::BeginWriteCurrentImage(VkImage* image,
void VulkanSwapChain::EndWriteCurrentImage(VkImageLayout image_layout,
VkSemaphore semaphore) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(is_writing_);
DCHECK(acquired_image_);
DCHECK(end_write_semaphore_ == VK_NULL_HANDLE);
@@ -324,29 +334,107 @@ void VulkanSwapChain::EndWriteCurrentImage(VkImageLayout image_layout,
is_writing_ = false;
}
-bool VulkanSwapChain::AcquireNextImage() {
- DCHECK(!acquired_image_);
+bool VulkanSwapChain::PresentBuffer(const gfx::Rect& rect) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK_EQ(state_, VK_SUCCESS);
+ DCHECK(acquired_image_);
+ DCHECK(end_write_semaphore_ != VK_NULL_HANDLE);
+
+ VkResult result = VK_SUCCESS;
VkDevice device = device_queue_->GetVulkanDevice();
- // The Vulkan spec doesn't require vkAcquireNextImageKHR() returns images in
- // the present order for a vulkan swap chain. However for the best
- // performance, the driver should return images in order. To avoid buggy
- // drivers, we will call vkAcquireNextImageKHR() continually until the
- // expected image is returned.
- do {
- bool all_images_are_tracked = in_present_images_.size() == images_.size();
- if (all_images_are_tracked) {
- // Only check the expected_next_image, when all images are tracked.
- uint32_t expected_next_image = in_present_images_.front();
- // If the expected next image has been acquired, use it and return true.
- if (images_[expected_next_image].is_acquired) {
- in_present_images_.pop_front();
- acquired_image_.emplace(expected_next_image);
- break;
- }
+ VkQueue queue = device_queue_->GetVulkanQueue();
+ auto* fence_helper = device_queue_->GetFenceHelper();
+
+ auto& current_image_data = images_[*acquired_image_];
+ if (current_image_data.layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
+ {
+ current_image_data.command_buffer->Clear();
+ ScopedSingleUseCommandBufferRecorder recorder(
+ *current_image_data.command_buffer);
+ current_image_data.command_buffer->TransitionImageLayout(
+ current_image_data.image, current_image_data.layout,
+ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
}
+ current_image_data.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
VkSemaphore vk_semaphore = CreateSemaphore(device);
- DCHECK(vk_semaphore != VK_NULL_HANDLE);
+ // Submit our command_buffer for the current buffer. It sets the image
+ // layout for presenting.
+ if (!current_image_data.command_buffer->Submit(1, &end_write_semaphore_, 1,
+ &vk_semaphore)) {
+ vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
+ return false;
+ }
+ current_image_data.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+ fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(end_write_semaphore_);
+ end_write_semaphore_ = vk_semaphore;
+ }
+
+ VkPresentInfoKHR present_info = {VK_STRUCTURE_TYPE_PRESENT_INFO_KHR};
+ present_info.waitSemaphoreCount = 1;
+ present_info.pWaitSemaphores = &end_write_semaphore_;
+ present_info.swapchainCount = 1;
+ present_info.pSwapchains = &swap_chain_;
+ present_info.pImageIndices = &acquired_image_.value();
+
+ VkRectLayerKHR rect_layer;
+ VkPresentRegionKHR present_region;
+ VkPresentRegionsKHR present_regions = {VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR};
+ if (is_incremental_present_supported_) {
+ rect_layer.offset = {rect.x(), rect.y()};
+ rect_layer.extent = {rect.width(), rect.height()};
+ rect_layer.layer = 0;
+
+ present_region.rectangleCount = 1;
+ present_region.pRectangles = &rect_layer;
+
+ present_regions.swapchainCount = 1;
+ present_regions.pRegions = &present_region;
+
+ present_info.pNext = &present_regions;
+ }
+
+ result = vkQueuePresentKHR(queue, &present_info);
+ if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
+ LOG(DFATAL) << "vkQueuePresentKHR() failed: " << result;
+ state_ = result;
+ return false;
+ }
+
+ LOG_IF(ERROR, result == VK_SUBOPTIMAL_KHR) << "Swapchian is suboptimal.";
+
+ if (current_image_data.present_begin_semaphore != VK_NULL_HANDLE) {
+ // |present_begin_semaphore| for the previous present for this image can be
+ // safely destroyed after semaphore got from vkAcquireNextImageHKR() is
+ // passed. That acquired semaphore should be already waited on for a
+ // submitted GPU work. So we can safely enqueue the
+ // |present_begin_semaphore| for cleanup here (the enqueued semaphore will
+ // be destroyed when all submitted GPU work is finished).
+ fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(
+ current_image_data.present_begin_semaphore);
+ }
+ // We are not sure when the semaphore is not used by present engine, so don't
+ // destroy the semaphore until the image is returned from present engine.
+ current_image_data.present_begin_semaphore = end_write_semaphore_;
+ end_write_semaphore_ = VK_NULL_HANDLE;
+
+ acquired_image_.reset();
+
+ return true;
+}
+
+bool VulkanSwapChain::AcquireNextImage() {
+ DCHECK_EQ(state_, VK_SUCCESS);
+ DCHECK(!acquired_image_);
+
+ // VulkanDeviceQueue is not threadsafe for now, but |device_queue_| will not
+ // be released, and device_queue_->device will never be changed after
+ // initialization, so it is safe for now.
+ // TODO(penghuang): make VulkanDeviceQueue threadsafe.
+ VkDevice device = device_queue_->GetVulkanDevice();
+
+ VkSemaphore vk_semaphore = CreateSemaphore(device);
+ DCHECK(vk_semaphore != VK_NULL_HANDLE);
#if defined(USE_X11)
// The xserver should still composite windows with a 1Hz fake vblank when
@@ -361,44 +449,46 @@ bool VulkanSwapChain::AcquireNextImage() {
#else
constexpr uint64_t kTimeout = UINT64_MAX;
#endif
- // Acquire the next image.
- uint32_t next_image;
- auto result =
- vkAcquireNextImageKHR(device, swap_chain_, kTimeout, vk_semaphore,
- VK_NULL_HANDLE, &next_image);
- if (result == VK_TIMEOUT) {
- LOG(ERROR) << "vkAcquireNextImageKHR() hangs.";
- vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
- state_ = VK_ERROR_SURFACE_LOST_KHR;
- return false;
- }
- if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
- LOG(DFATAL) << "vkAcquireNextImageKHR() failed: " << result;
- vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
- state_ = result;
- return false;
- }
+ // Acquire the next image.
+ uint32_t next_image;
+ auto result = ({
+ base::ScopedBlockingCall scoped_blocking_call(
+ FROM_HERE, base::BlockingType::WILL_BLOCK);
+ vkAcquireNextImageKHR(device, swap_chain_, kTimeout, vk_semaphore,
+ VK_NULL_HANDLE, &next_image);
+ });
+
+ if (result == VK_TIMEOUT) {
+ LOG(ERROR) << "vkAcquireNextImageKHR() hangs.";
+ vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
+ state_ = VK_ERROR_SURFACE_LOST_KHR;
+ return false;
+ }
- DCHECK(!images_[next_image].is_acquired);
- DCHECK(images_[next_image].present_end_semaphore == VK_NULL_HANDLE);
- images_[next_image].is_acquired = true;
- images_[next_image].present_end_semaphore = vk_semaphore;
-
- auto it = std::find(in_present_images_.begin(), in_present_images_.end(),
- next_image);
- if (it == in_present_images_.end()) {
- DCHECK(!all_images_are_tracked);
- // Got an image which is not in the present queue due to the new created
- // swap chain. In this case, just use this image.
- acquired_image_.emplace(next_image);
- break;
- }
- LOG_IF(ERROR, it != in_present_images_.begin())
- << "vkAcquireNextImageKHR() returned an unexpected image.";
- } while (true);
+ if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
+ LOG(DFATAL) << "vkAcquireNextImageKHR() failed: " << result;
+ vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
+ state_ = result;
+ return false;
+ }
+
+ DCHECK(images_[next_image].present_end_semaphore == VK_NULL_HANDLE);
+ images_[next_image].present_end_semaphore = vk_semaphore;
+ acquired_image_.emplace(next_image);
return true;
}
+void VulkanSwapChain::WaitUntilPostSubBufferAsyncFinished() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ while (has_pending_post_sub_buffer_) {
+ base::ScopedBlockingCall scoped_blocking_call(
+ FROM_HERE, base::BlockingType::WILL_BLOCK);
+ condition_variable_.Wait();
+ }
+ DCHECK(acquired_image_ || state_ != VK_SUCCESS);
+}
+
VulkanSwapChain::ScopedWrite::ScopedWrite(VulkanSwapChain* swap_chain)
: swap_chain_(swap_chain) {
success_ = swap_chain_->BeginWriteCurrentImage(
diff --git a/chromium/gpu/vulkan/vulkan_swap_chain.h b/chromium/gpu/vulkan/vulkan_swap_chain.h
index bb92873da28..65261abdeb5 100644
--- a/chromium/gpu/vulkan/vulkan_swap_chain.h
+++ b/chromium/gpu/vulkan/vulkan_swap_chain.h
@@ -10,14 +10,22 @@
#include <memory>
#include <vector>
+#include "base/callback.h"
#include "base/component_export.h"
#include "base/containers/circular_deque.h"
-#include "base/logging.h"
+#include "base/memory/scoped_refptr.h"
#include "base/optional.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/swap_result.h"
+namespace base {
+class SingleThreadTaskRunner;
+}
+
namespace gpu {
class VulkanCommandBuffer;
@@ -73,12 +81,36 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain {
void Destroy();
// Present the current buffer.
- gfx::SwapResult PresentBuffer(const gfx::Rect& rect);
-
- uint32_t num_images() const { return static_cast<uint32_t>(images_.size()); }
- const gfx::Size& size() const { return size_; }
- bool use_protected_memory() const { return use_protected_memory_; }
- VkResult state() const { return state_; }
+ gfx::SwapResult PostSubBuffer(const gfx::Rect& rect);
+ using PostSubBufferCompletionCallback =
+ base::OnceCallback<void(gfx::SwapResult)>;
+ void PostSubBufferAsync(const gfx::Rect& rect,
+ PostSubBufferCompletionCallback callback);
+
+ uint32_t num_images() const {
+ // size of |images_| will not be changed after initializing, so it is safe
+ // to read it here.
+ return static_cast<uint32_t>(TS_UNCHECKED_READ(images_).size());
+ }
+ const gfx::Size& size() const {
+ // |size_| is never changed after initialization.
+ return size_;
+ }
+ bool use_protected_memory() const {
+ // |use_protected_memory_| is never changed after initialization.
+ return use_protected_memory_;
+ }
+
+ uint32_t current_image_index() const {
+ base::AutoLock auto_lock(lock_);
+ DCHECK(acquired_image_);
+ return *acquired_image_;
+ }
+
+ VkResult state() const {
+ base::AutoLock auto_lock(lock_);
+ return state_;
+ }
private:
bool InitializeSwapChain(VkSurfaceKHR surface,
@@ -87,26 +119,31 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain {
uint32_t min_image_count,
VkSurfaceTransformFlagBitsKHR pre_transform,
bool use_protected_memory,
- std::unique_ptr<VulkanSwapChain> old_swap_chain);
- void DestroySwapChain();
+ std::unique_ptr<VulkanSwapChain> old_swap_chain)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void DestroySwapChain() EXCLUSIVE_LOCKS_REQUIRED(lock_);
- bool InitializeSwapImages(const VkSurfaceFormatKHR& surface_format);
- void DestroySwapImages();
+ bool InitializeSwapImages(const VkSurfaceFormatKHR& surface_format)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void DestroySwapImages() EXCLUSIVE_LOCKS_REQUIRED(lock_);
bool BeginWriteCurrentImage(VkImage* image,
uint32_t* image_index,
VkImageLayout* layout,
VkSemaphore* semaphore);
void EndWriteCurrentImage(VkImageLayout layout, VkSemaphore semaphore);
- bool AcquireNextImage();
+ bool PresentBuffer(const gfx::Rect& rect) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ bool AcquireNextImage() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ // Wait until PostSubBufferAsync() is finished on ThreadPool.
+ void WaitUntilPostSubBufferAsyncFinished() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ mutable base::Lock lock_;
bool use_protected_memory_ = false;
VulkanDeviceQueue* device_queue_ = nullptr;
bool is_incremental_present_supported_ = false;
- VkSwapchainKHR swap_chain_ = VK_NULL_HANDLE;
-
+ VkSwapchainKHR swap_chain_ GUARDED_BY(lock_) = VK_NULL_HANDLE;
std::unique_ptr<VulkanCommandPool> command_pool_;
-
gfx::Size size_;
struct ImageData {
@@ -123,18 +160,34 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain {
VkSemaphore present_begin_semaphore = VK_NULL_HANDLE;
// Semaphore signaled when present engine is done with the image.
VkSemaphore present_end_semaphore = VK_NULL_HANDLE;
- // True indicates the image is acquired from swapchain and haven't sent back
- // to swapchain for presenting.
- bool is_acquired = false;
};
- std::vector<ImageData> images_;
-
- // Acquired image index.
- base::circular_deque<uint32_t> in_present_images_;
- base::Optional<uint32_t> acquired_image_;
- bool is_writing_ = false;
- VkSemaphore end_write_semaphore_ = VK_NULL_HANDLE;
- VkResult state_ = VK_SUCCESS;
+
+ // Images in the swap chain.
+ std::vector<ImageData> images_ GUARDED_BY(lock_);
+
+ base::circular_deque<uint32_t> in_present_images_ GUARDED_BY(lock_);
+ bool is_writing_ GUARDED_BY(lock_) = false;
+ VkSemaphore end_write_semaphore_ GUARDED_BY(lock_) = VK_NULL_HANDLE;
+
+ // Condition variable is signalled when a PostSubBufferAsync() is finished.
+ base::ConditionVariable condition_variable_{&lock_};
+
+ // True if there is pending post sub buffer in the fly.
+ bool has_pending_post_sub_buffer_ GUARDED_BY(lock_) = false;
+
+ // The current swapchain state_.
+ VkResult state_ GUARDED_BY(lock_) = VK_SUCCESS;
+
+ // Acquired images queue.
+ base::Optional<uint32_t> acquired_image_ GUARDED_BY(lock_);
+
+ // For executing task on GPU main thread.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ // For executing PosSubBufferAsync tasks off the GPU main thread.
+ scoped_refptr<base::SequencedTaskRunner> post_sub_buffer_task_runner_;
+
+ THREAD_CHECKER(thread_checker_);
DISALLOW_COPY_AND_ASSIGN(VulkanSwapChain);
};
diff --git a/chromium/gpu/vulkan/vulkan_util.cc b/chromium/gpu/vulkan/vulkan_util.cc
index 8a9661a3301..ca52a27bfaa 100644
--- a/chromium/gpu/vulkan/vulkan_util.cc
+++ b/chromium/gpu/vulkan/vulkan_util.cc
@@ -7,6 +7,9 @@
#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/strings/stringprintf.h"
+#include "build/build_config.h"
+#include "gpu/config/gpu_info.h" // nogncheck
+#include "gpu/config/vulkan_info.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
namespace gpu {
@@ -100,4 +103,48 @@ void ReportQueueSubmitPerSwapBuffers() {
last_count = g_submit_count;
}
+bool CheckVulkanCompabilities(const VulkanInfo& vulkan_info,
+ const GPUInfo& gpu_info) {
+// Android uses AHB and SyncFD for interop. They are imported into GL with other
+// API.
+#if !defined(OS_ANDROID)
+#if defined(OS_WIN)
+ constexpr char kMemoryObjectExtension[] = "GL_EXT_memory_object_win32";
+ constexpr char kSemaphoreExtension[] = "GL_EXT_semaphore_win32";
+#elif defined(OS_FUCHSIA)
+ constexpr char kMemoryObjectExtension[] = "GL_ANGLE_memory_object_fuchsia";
+ constexpr char kSemaphoreExtension[] = "GL_ANGLE_semaphore_fuchsia";
+#else
+ constexpr char kMemoryObjectExtension[] = "GL_EXT_memory_object_fd";
+ constexpr char kSemaphoreExtension[] = "GL_EXT_semaphore_fd";
+#endif
+ // If both Vulkan and GL are using native GPU (non swiftshader), check
+ // necessary extensions for GL and Vulkan interop.
+ const auto extensions = gfx::MakeExtensionSet(gpu_info.gl_extensions);
+ if (!gfx::HasExtension(extensions, kMemoryObjectExtension) ||
+ !gfx::HasExtension(extensions, kSemaphoreExtension)) {
+ DLOG(ERROR) << kMemoryObjectExtension << " or " << kSemaphoreExtension
+ << " is not supported.";
+ return false;
+ }
+#endif // !defined(OS_ANDROID)
+
+#if defined(OS_ANDROID)
+ if (vulkan_info.physical_devices.empty())
+ return false;
+
+ const auto& device_info = vulkan_info.physical_devices.front();
+ constexpr uint32_t kVendorARM = 0x13b5;
+
+ // https://crbug.com/1096222: Display problem with Huawei and Honor devices
+ // with Mali GPU. The Mali driver version is < 19.0.0.
+ if (device_info.properties.vendorID == kVendorARM &&
+ device_info.properties.driverVersion < VK_MAKE_VERSION(19, 0, 0)) {
+ return false;
+ }
+#endif // defined(OS_ANDROID)
+
+ return true;
+}
+
} // namespace gpu \ No newline at end of file
diff --git a/chromium/gpu/vulkan/vulkan_util.h b/chromium/gpu/vulkan/vulkan_util.h
index e6f44251aca..f2d6c5cb879 100644
--- a/chromium/gpu/vulkan/vulkan_util.h
+++ b/chromium/gpu/vulkan/vulkan_util.h
@@ -18,6 +18,9 @@
namespace gpu {
+struct GPUInfo;
+class VulkanInfo;
+
// Submits semaphores to be signaled to the vulkan queue. Semaphores are
// signaled once this submission is executed. vk_fence is an optional handle
// to fence to be signaled once this submission completes execution.
@@ -79,6 +82,10 @@ VKAPI_ATTR VkResult VKAPI_CALL QueueSubmitHook(VkQueue queue,
COMPONENT_EXPORT(VULKAN) void ReportQueueSubmitPerSwapBuffers();
+COMPONENT_EXPORT(VULKAN)
+bool CheckVulkanCompabilities(const VulkanInfo& vulkan_info,
+ const GPUInfo& gpu_info);
+
} // namespace gpu
#endif // GPU_VULKAN_VULKAN_UTIL_H_
diff --git a/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc b/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
index 6e8f4cdeaf0..ec752c329d4 100644
--- a/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
+++ b/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
@@ -77,7 +77,7 @@ VulkanImplementationX11::VulkanImplementationX11(bool use_swiftshader)
gfx::GetXDisplay();
}
-VulkanImplementationX11::~VulkanImplementationX11() {}
+VulkanImplementationX11::~VulkanImplementationX11() = default;
bool VulkanImplementationX11::InitializeVulkanInstance(bool using_surface) {
if (using_surface && !use_swiftshader() && !IsVulkanSurfaceSupported())
@@ -126,7 +126,8 @@ std::unique_ptr<VulkanSurface> VulkanImplementationX11::CreateViewSurface(
gfx::AcceleratedWidget window) {
if (!using_surface_)
return nullptr;
- return VulkanSurfaceX11::Create(vulkan_instance_.vk_instance(), window);
+ return VulkanSurfaceX11::Create(vulkan_instance_.vk_instance(),
+ static_cast<x11::Window>(window));
}
bool VulkanImplementationX11::GetPhysicalDevicePresentationSupport(
diff --git a/chromium/gpu/vulkan/x/vulkan_surface_x11.cc b/chromium/gpu/vulkan/x/vulkan_surface_x11.cc
index 3bb675440cf..592a574a99e 100644
--- a/chromium/gpu/vulkan/x/vulkan_surface_x11.cc
+++ b/chromium/gpu/vulkan/x/vulkan_surface_x11.cc
@@ -7,6 +7,7 @@
#include "base/logging.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
#include "ui/events/platform/x11/x11_event_source.h"
+#include "ui/gfx/native_widget_types.h"
namespace gpu {
@@ -14,7 +15,8 @@ class VulkanSurfaceX11::ExposeEventForwarder : public ui::XEventDispatcher {
public:
explicit ExposeEventForwarder(VulkanSurfaceX11* surface) : surface_(surface) {
if (auto* event_source = ui::X11EventSource::GetInstance()) {
- XSelectInput(gfx::GetXDisplay(), surface_->window_, ExposureMask);
+ XSelectInput(gfx::GetXDisplay(), static_cast<uint32_t>(surface_->window_),
+ ExposureMask);
event_source->AddXEventDispatcher(this);
}
}
@@ -25,7 +27,7 @@ class VulkanSurfaceX11::ExposeEventForwarder : public ui::XEventDispatcher {
}
// ui::XEventDispatcher:
- bool DispatchXEvent(XEvent* xevent) override {
+ bool DispatchXEvent(x11::Event* xevent) override {
if (!surface_->CanDispatchXEvent(xevent))
return false;
surface_->ForwardXExposeEvent(xevent);
@@ -40,17 +42,19 @@ class VulkanSurfaceX11::ExposeEventForwarder : public ui::XEventDispatcher {
// static
std::unique_ptr<VulkanSurfaceX11> VulkanSurfaceX11::Create(
VkInstance vk_instance,
- Window parent_window) {
+ x11::Window parent_window) {
XDisplay* display = gfx::GetXDisplay();
XWindowAttributes attributes;
- if (!XGetWindowAttributes(display, parent_window, &attributes)) {
- LOG(ERROR) << "XGetWindowAttributes failed for window " << parent_window
- << ".";
+ if (!XGetWindowAttributes(display, static_cast<uint32_t>(parent_window),
+ &attributes)) {
+ LOG(ERROR) << "XGetWindowAttributes failed for window "
+ << static_cast<uint32_t>(parent_window) << ".";
return nullptr;
}
- Window window = XCreateWindow(display, parent_window, 0, 0, attributes.width,
- attributes.height, 0, CopyFromParent,
- InputOutput, CopyFromParent, 0, nullptr);
+ Window window = XCreateWindow(
+ display, static_cast<uint32_t>(parent_window), 0, 0, attributes.width,
+ attributes.height, 0, static_cast<int>(x11::WindowClass::CopyFromParent),
+ static_cast<int>(x11::WindowClass::InputOutput), nullptr, 0, nullptr);
if (!window) {
LOG(ERROR) << "XCreateWindow failed.";
return nullptr;
@@ -68,42 +72,45 @@ std::unique_ptr<VulkanSurfaceX11> VulkanSurfaceX11::Create(
DLOG(ERROR) << "vkCreateXlibSurfaceKHR() failed: " << result;
return nullptr;
}
- return std::make_unique<VulkanSurfaceX11>(vk_instance, vk_surface,
- parent_window, window);
+ return std::make_unique<VulkanSurfaceX11>(
+ vk_instance, vk_surface, parent_window, static_cast<x11::Window>(window));
}
VulkanSurfaceX11::VulkanSurfaceX11(VkInstance vk_instance,
VkSurfaceKHR vk_surface,
- Window parent_window,
- Window window)
+ x11::Window parent_window,
+ x11::Window window)
: VulkanSurface(vk_instance,
- window,
+ static_cast<gfx::AcceleratedWidget>(window),
vk_surface,
false /* use_protected_memory */),
parent_window_(parent_window),
window_(window),
expose_event_forwarder_(new ExposeEventForwarder(this)) {}
-VulkanSurfaceX11::~VulkanSurfaceX11() {}
+VulkanSurfaceX11::~VulkanSurfaceX11() = default;
// VulkanSurface:
bool VulkanSurfaceX11::Reshape(const gfx::Size& size,
gfx::OverlayTransform pre_transform) {
DCHECK_EQ(pre_transform, gfx::OVERLAY_TRANSFORM_NONE);
- XResizeWindow(gfx::GetXDisplay(), window_, size.width(), size.height());
+ XResizeWindow(gfx::GetXDisplay(), static_cast<uint32_t>(window_),
+ size.width(), size.height());
return VulkanSurface::Reshape(size, pre_transform);
}
-bool VulkanSurfaceX11::CanDispatchXEvent(const XEvent* event) {
- return event->type == Expose && event->xexpose.window == window_;
+bool VulkanSurfaceX11::CanDispatchXEvent(const x11::Event* x11_event) {
+ const XEvent* event = &x11_event->xlib_event();
+ return event->type == Expose &&
+ event->xexpose.window == static_cast<uint32_t>(window_);
}
-void VulkanSurfaceX11::ForwardXExposeEvent(const XEvent* event) {
- XEvent forwarded_event = *event;
- forwarded_event.xexpose.window = parent_window_;
- XSendEvent(gfx::GetXDisplay(), parent_window_, False, ExposureMask,
- &forwarded_event);
+void VulkanSurfaceX11::ForwardXExposeEvent(const x11::Event* event) {
+ XEvent forwarded_event = event->xlib_event();
+ forwarded_event.xexpose.window = static_cast<uint32_t>(parent_window_);
+ XSendEvent(gfx::GetXDisplay(), static_cast<uint32_t>(parent_window_), False,
+ ExposureMask, &forwarded_event);
XFlush(gfx::GetXDisplay());
}
diff --git a/chromium/gpu/vulkan/x/vulkan_surface_x11.h b/chromium/gpu/vulkan/x/vulkan_surface_x11.h
index 5c99d6ea907..585fe197d26 100644
--- a/chromium/gpu/vulkan/x/vulkan_surface_x11.h
+++ b/chromium/gpu/vulkan/x/vulkan_surface_x11.h
@@ -9,6 +9,7 @@
#include "base/macros.h"
#include "gpu/vulkan/vulkan_surface.h"
+#include "ui/gfx/x/event.h"
#include "ui/gfx/x/x11_types.h"
namespace gpu {
@@ -16,11 +17,11 @@ namespace gpu {
class VulkanSurfaceX11 : public VulkanSurface {
public:
static std::unique_ptr<VulkanSurfaceX11> Create(VkInstance vk_instance,
- Window parent_window);
+ x11::Window parent_window);
VulkanSurfaceX11(VkInstance vk_instance,
VkSurfaceKHR vk_surface,
- Window parent_window,
- Window window);
+ x11::Window parent_window,
+ x11::Window window);
~VulkanSurfaceX11() override;
// VulkanSurface:
@@ -29,11 +30,11 @@ class VulkanSurfaceX11 : public VulkanSurface {
private:
class ExposeEventForwarder;
- bool CanDispatchXEvent(const XEvent* event);
- void ForwardXExposeEvent(const XEvent* event);
+ bool CanDispatchXEvent(const x11::Event* event);
+ void ForwardXExposeEvent(const x11::Event* event);
- const Window parent_window_;
- const Window window_;
+ const x11::Window parent_window_;
+ const x11::Window window_;
std::unique_ptr<ExposeEventForwarder> expose_event_forwarder_;
DISALLOW_COPY_AND_ASSIGN(VulkanSurfaceX11);