1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
|
/*
* Copyright 2019 Google LLC
* SPDX-License-Identifier: MIT
*
* based in part on anv and radv which are:
* Copyright © 2015 Intel Corporation
* Copyright © 2016 Red Hat.
* Copyright © 2016 Bas Nieuwenhuizen
*/
#ifndef VN_QUEUE_H
#define VN_QUEUE_H
#include "vn_common.h"
#include "vn_feedback.h"
struct vn_queue {
struct vn_object_base base;
struct vn_device *device;
uint32_t family;
uint32_t index;
uint32_t flags;
/* only used if renderer supports multiple timelines */
uint32_t ring_idx;
/* wait fence used for vn_QueueWaitIdle */
VkFence wait_fence;
/* sync fence used for Android wsi */
VkFence sync_fence;
/* semaphore for gluing vkQueueSubmit feedback commands to
* vkQueueBindSparse
*/
VkSemaphore sparse_semaphore;
uint64_t sparse_semaphore_counter;
};
VK_DEFINE_HANDLE_CASTS(vn_queue, base.base, VkQueue, VK_OBJECT_TYPE_QUEUE)
enum vn_sync_type {
/* no payload */
VN_SYNC_TYPE_INVALID,
/* device object */
VN_SYNC_TYPE_DEVICE_ONLY,
/* payload is an imported sync file */
VN_SYNC_TYPE_IMPORTED_SYNC_FD,
};
struct vn_sync_payload {
enum vn_sync_type type;
/* If type is VN_SYNC_TYPE_IMPORTED_SYNC_FD, fd is a sync file. */
int fd;
};
/* For external fences and external semaphores submitted to be signaled. The
* Vulkan spec guarantees those external syncs are on permanent payload.
*/
struct vn_sync_payload_external {
/* ring_idx of the last queue submission */
uint32_t ring_idx;
/* valid when NO_ASYNC_QUEUE_SUBMIT perf option is not used */
bool ring_seqno_valid;
/* ring seqno of the last queue submission */
uint32_t ring_seqno;
};
struct vn_fence {
struct vn_object_base base;
struct vn_sync_payload *payload;
struct vn_sync_payload permanent;
struct vn_sync_payload temporary;
struct {
/* non-NULL if VN_PERF_NO_FENCE_FEEDBACK is disabled */
struct vn_feedback_slot *slot;
VkCommandBuffer *commands;
} feedback;
bool is_external;
struct vn_sync_payload_external external_payload;
};
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_fence,
base.base,
VkFence,
VK_OBJECT_TYPE_FENCE)
struct vn_semaphore {
struct vn_object_base base;
VkSemaphoreType type;
struct vn_sync_payload *payload;
struct vn_sync_payload permanent;
struct vn_sync_payload temporary;
struct {
/* non-NULL if VN_PERF_NO_TIMELINE_SEM_FEEDBACK is disabled */
struct vn_feedback_slot *slot;
/* Lists of allocated vn_feedback_src
* The pending_src_list tracks vn_feedback_src slots that have
* not been signaled since the last submission cleanup.
* The free_src_list tracks vn_feedback_src slots that have
* signaled and can be reused.
* On submission prepare, used vn_feedback_src are moved from
* the free list to the pending list. On submission cleanup,
* vn_feedback_src of any associated semaphores are checked
* and moved to the free list if they were signaled.
* vn_feedback_src slots are allocated on demand if the
* free_src_list is empty.
*/
struct list_head pending_src_list;
struct list_head free_src_list;
/* Lock for accessing free/pending src lists */
simple_mtx_t src_lists_mtx;
/* Cached counter value to track if an async sem wait call is needed */
uint64_t signaled_counter;
/* Lock for checking if an async sem wait call is needed based on
* the current counter value and signaled_counter to ensure async
* wait order across threads.
*/
simple_mtx_t async_wait_mtx;
} feedback;
bool is_external;
struct vn_sync_payload_external external_payload;
};
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_semaphore,
base.base,
VkSemaphore,
VK_OBJECT_TYPE_SEMAPHORE)
struct vn_event {
struct vn_object_base base;
/* non-NULL if below are satisfied:
* - event is created without VK_EVENT_CREATE_DEVICE_ONLY_BIT
* - VN_PERF_NO_EVENT_FEEDBACK is disabled
*/
struct vn_feedback_slot *feedback_slot;
};
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_event,
base.base,
VkEvent,
VK_OBJECT_TYPE_EVENT)
void
vn_fence_signal_wsi(struct vn_device *dev, struct vn_fence *fence);
void
vn_semaphore_signal_wsi(struct vn_device *dev, struct vn_semaphore *sem);
#endif /* VN_QUEUE_H */
|