1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
|
/*
* Copyright 2019 Google LLC
* SPDX-License-Identifier: MIT
*
* based in part on anv and radv which are:
* Copyright © 2015 Intel Corporation
* Copyright © 2016 Red Hat.
* Copyright © 2016 Bas Nieuwenhuizen
*/
#ifndef VN_INSTANCE_H
#define VN_INSTANCE_H
#include "vn_common.h"
#include "venus-protocol/vn_protocol_driver_defines.h"
#include "vn_cs.h"
#include "vn_renderer.h"
#include "vn_renderer_util.h"
#include "vn_ring.h"
/* require and request at least Vulkan 1.1 at both instance and device levels
*/
#define VN_MIN_RENDERER_VERSION VK_API_VERSION_1_1
/* max advertised version at both instance and device levels */
#ifdef ANDROID
#define VN_MAX_API_VERSION VK_MAKE_VERSION(1, 1, VK_HEADER_VERSION)
#else
#define VN_MAX_API_VERSION VK_MAKE_VERSION(1, 3, VK_HEADER_VERSION)
#endif
struct vn_instance {
struct vn_instance_base base;
struct driOptionCache dri_options;
struct driOptionCache available_dri_options;
struct vn_renderer *renderer;
struct vn_renderer_shmem_pool reply_shmem_pool;
mtx_t ring_idx_mutex;
uint64_t ring_idx_used_mask;
struct {
mtx_t mutex;
struct vn_renderer_shmem *shmem;
struct vn_ring ring;
uint64_t id;
struct vn_cs_encoder upload;
uint32_t command_dropped;
/* to synchronize renderer/ring */
mtx_t roundtrip_mutex;
uint64_t roundtrip_next;
} ring;
/* Between the driver and the app, VN_MAX_API_VERSION is what we advertise
* and base.base.app_info.api_version is what the app requests.
*
* Between the driver and the renderer, renderer_api_version is the api
* version we request internally, which can be higher than
* base.base.app_info.api_version. renderer_version is the instance
* version we can use internally.
*/
uint32_t renderer_api_version;
uint32_t renderer_version;
/* for VN_CS_ENCODER_STORAGE_SHMEM_POOL */
struct {
mtx_t mutex;
struct vn_renderer_shmem_pool pool;
} cs_shmem;
struct {
mtx_t mutex;
bool initialized;
struct vn_physical_device *devices;
uint32_t device_count;
VkPhysicalDeviceGroupProperties *groups;
uint32_t group_count;
} physical_device;
};
VK_DEFINE_HANDLE_CASTS(vn_instance,
base.base.base,
VkInstance,
VK_OBJECT_TYPE_INSTANCE)
VkResult
vn_instance_submit_roundtrip(struct vn_instance *instance,
uint64_t *roundtrip_seqno);
void
vn_instance_wait_roundtrip(struct vn_instance *instance,
uint64_t roundtrip_seqno);
static inline void
vn_instance_roundtrip(struct vn_instance *instance)
{
uint64_t roundtrip_seqno;
if (vn_instance_submit_roundtrip(instance, &roundtrip_seqno) == VK_SUCCESS)
vn_instance_wait_roundtrip(instance, roundtrip_seqno);
}
VkResult
vn_instance_ring_submit(struct vn_instance *instance,
const struct vn_cs_encoder *cs);
struct vn_instance_submit_command {
/* empty command implies errors */
struct vn_cs_encoder command;
struct vn_cs_encoder_buffer buffer;
/* non-zero implies waiting */
size_t reply_size;
/* when reply_size is non-zero, NULL can be returned on errors */
struct vn_renderer_shmem *reply_shmem;
struct vn_cs_decoder reply;
/* valid when instance ring submission succeeds */
bool ring_seqno_valid;
uint32_t ring_seqno;
};
static inline struct vn_cs_encoder *
vn_instance_submit_command_init(struct vn_instance *instance,
struct vn_instance_submit_command *submit,
void *cmd_data,
size_t cmd_size,
size_t reply_size)
{
submit->buffer = VN_CS_ENCODER_BUFFER_INITIALIZER(cmd_data);
submit->command = VN_CS_ENCODER_INITIALIZER(&submit->buffer, cmd_size);
submit->reply_size = reply_size;
submit->reply_shmem = NULL;
return &submit->command;
}
void
vn_instance_submit_command(struct vn_instance *instance,
struct vn_instance_submit_command *submit);
static inline struct vn_cs_decoder *
vn_instance_get_command_reply(struct vn_instance *instance,
struct vn_instance_submit_command *submit)
{
return submit->reply_shmem ? &submit->reply : NULL;
}
static inline void
vn_instance_free_command_reply(struct vn_instance *instance,
struct vn_instance_submit_command *submit)
{
assert(submit->reply_shmem);
vn_renderer_shmem_unref(instance->renderer, submit->reply_shmem);
}
static inline struct vn_renderer_shmem *
vn_instance_cs_shmem_alloc(struct vn_instance *instance,
size_t size,
size_t *out_offset)
{
struct vn_renderer_shmem *shmem;
mtx_lock(&instance->cs_shmem.mutex);
shmem = vn_renderer_shmem_pool_alloc(
instance->renderer, &instance->cs_shmem.pool, size, out_offset);
mtx_unlock(&instance->cs_shmem.mutex);
return shmem;
}
static inline int
vn_instance_acquire_ring_idx(struct vn_instance *instance)
{
mtx_lock(&instance->ring_idx_mutex);
int ring_idx = ffsll(~instance->ring_idx_used_mask) - 1;
if (ring_idx >= instance->renderer->info.max_timeline_count)
ring_idx = -1;
if (ring_idx > 0)
instance->ring_idx_used_mask |= (1ULL << (uint32_t)ring_idx);
mtx_unlock(&instance->ring_idx_mutex);
assert(ring_idx); /* never acquire the dedicated CPU ring */
/* returns -1 when no vacant rings */
return ring_idx;
}
static inline void
vn_instance_release_ring_idx(struct vn_instance *instance, uint32_t ring_idx)
{
assert(ring_idx > 0);
mtx_lock(&instance->ring_idx_mutex);
assert(instance->ring_idx_used_mask & (1ULL << ring_idx));
instance->ring_idx_used_mask &= ~(1ULL << ring_idx);
mtx_unlock(&instance->ring_idx_mutex);
}
#endif /* VN_INSTANCE_H */
|