1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
|
//===- CudaRuntimeWrappers.cpp - MLIR CUDA API wrapper library ------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Implements C wrappers around the CUDA library for easy linking in ORC jit.
// Also adds some debugging helpers that are helpful when writing MLIR code to
// run on GPUs.
//
//===----------------------------------------------------------------------===//
#include "mlir/ExecutionEngine/CRunnerUtils.h"
#include <stdio.h>
#include "cuda.h"
#include "cusparse.h"
#ifdef _WIN32
#define MLIR_CUDA_WRAPPERS_EXPORT __declspec(dllexport)
#else
#define MLIR_CUDA_WRAPPERS_EXPORT
#endif // _WIN32
#define CUDA_REPORT_IF_ERROR(expr) \
[](CUresult result) { \
if (!result) \
return; \
const char *name = nullptr; \
cuGetErrorName(result, &name); \
if (!name) \
name = "<unknown>"; \
fprintf(stderr, "'%s' failed with '%s'\n", #expr, name); \
}(expr)
#define CUSPARSE_REPORT_IF_ERROR(expr) \
{ \
cusparseStatus_t status = (expr); \
if (status != CUSPARSE_STATUS_SUCCESS) { \
fprintf(stderr, "cuSPARSE '%s' failed with '%s'\n", #expr, \
cusparseGetErrorString(status)); \
} \
}
thread_local static int32_t defaultDevice = 0;
// Make the primary context of the current default device current for the
// duration
// of the instance and restore the previous context on destruction.
class ScopedContext {
public:
ScopedContext() {
// Static reference to CUDA primary context for device ordinal
// defaultDevice.
static CUcontext context = [] {
CUDA_REPORT_IF_ERROR(cuInit(/*flags=*/0));
CUdevice device;
CUDA_REPORT_IF_ERROR(cuDeviceGet(&device, /*ordinal=*/defaultDevice));
CUcontext ctx;
// Note: this does not affect the current context.
CUDA_REPORT_IF_ERROR(cuDevicePrimaryCtxRetain(&ctx, device));
return ctx;
}();
CUDA_REPORT_IF_ERROR(cuCtxPushCurrent(context));
}
~ScopedContext() { CUDA_REPORT_IF_ERROR(cuCtxPopCurrent(nullptr)); }
};
extern "C" MLIR_CUDA_WRAPPERS_EXPORT CUmodule mgpuModuleLoad(void *data) {
ScopedContext scopedContext;
CUmodule module = nullptr;
CUDA_REPORT_IF_ERROR(cuModuleLoadData(&module, data));
return module;
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuModuleUnload(CUmodule module) {
CUDA_REPORT_IF_ERROR(cuModuleUnload(module));
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT CUfunction
mgpuModuleGetFunction(CUmodule module, const char *name) {
CUfunction function = nullptr;
CUDA_REPORT_IF_ERROR(cuModuleGetFunction(&function, module, name));
return function;
}
// The wrapper uses intptr_t instead of CUDA's unsigned int to match
// the type of MLIR's index type. This avoids the need for casts in the
// generated MLIR code.
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
mgpuLaunchKernel(CUfunction function, intptr_t gridX, intptr_t gridY,
intptr_t gridZ, intptr_t blockX, intptr_t blockY,
intptr_t blockZ, int32_t smem, CUstream stream, void **params,
void **extra) {
ScopedContext scopedContext;
CUDA_REPORT_IF_ERROR(cuLaunchKernel(function, gridX, gridY, gridZ, blockX,
blockY, blockZ, smem, stream, params,
extra));
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT CUstream mgpuStreamCreate() {
ScopedContext scopedContext;
CUstream stream = nullptr;
CUDA_REPORT_IF_ERROR(cuStreamCreate(&stream, CU_STREAM_NON_BLOCKING));
return stream;
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamDestroy(CUstream stream) {
CUDA_REPORT_IF_ERROR(cuStreamDestroy(stream));
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
mgpuStreamSynchronize(CUstream stream) {
CUDA_REPORT_IF_ERROR(cuStreamSynchronize(stream));
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamWaitEvent(CUstream stream,
CUevent event) {
CUDA_REPORT_IF_ERROR(cuStreamWaitEvent(stream, event, /*flags=*/0));
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT CUevent mgpuEventCreate() {
ScopedContext scopedContext;
CUevent event = nullptr;
CUDA_REPORT_IF_ERROR(cuEventCreate(&event, CU_EVENT_DISABLE_TIMING));
return event;
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventDestroy(CUevent event) {
CUDA_REPORT_IF_ERROR(cuEventDestroy(event));
}
extern MLIR_CUDA_WRAPPERS_EXPORT "C" void mgpuEventSynchronize(CUevent event) {
CUDA_REPORT_IF_ERROR(cuEventSynchronize(event));
}
extern MLIR_CUDA_WRAPPERS_EXPORT "C" void mgpuEventRecord(CUevent event,
CUstream stream) {
CUDA_REPORT_IF_ERROR(cuEventRecord(event, stream));
}
extern "C" void *mgpuMemAlloc(uint64_t sizeBytes, CUstream /*stream*/) {
ScopedContext scopedContext;
CUdeviceptr ptr;
CUDA_REPORT_IF_ERROR(cuMemAlloc(&ptr, sizeBytes));
return reinterpret_cast<void *>(ptr);
}
extern "C" void mgpuMemFree(void *ptr, CUstream /*stream*/) {
CUDA_REPORT_IF_ERROR(cuMemFree(reinterpret_cast<CUdeviceptr>(ptr)));
}
extern "C" void mgpuMemcpy(void *dst, void *src, size_t sizeBytes,
CUstream stream) {
CUDA_REPORT_IF_ERROR(cuMemcpyAsync(reinterpret_cast<CUdeviceptr>(dst),
reinterpret_cast<CUdeviceptr>(src),
sizeBytes, stream));
}
extern "C" void mgpuMemset32(void *dst, unsigned int value, size_t count,
CUstream stream) {
CUDA_REPORT_IF_ERROR(cuMemsetD32Async(reinterpret_cast<CUdeviceptr>(dst),
value, count, stream));
}
///
/// Helper functions for writing mlir example code
///
// Allows to register byte array with the CUDA runtime. Helpful until we have
// transfer functions implemented.
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
mgpuMemHostRegister(void *ptr, uint64_t sizeBytes) {
ScopedContext scopedContext;
CUDA_REPORT_IF_ERROR(cuMemHostRegister(ptr, sizeBytes, /*flags=*/0));
}
/// Registers a memref with the CUDA runtime. `descriptor` is a pointer to a
/// ranked memref descriptor struct of rank `rank`. Helpful until we have
/// transfer functions implemented.
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
mgpuMemHostRegisterMemRef(int64_t rank, StridedMemRefType<char, 1> *descriptor,
int64_t elementSizeBytes) {
// Only densely packed tensors are currently supported.
int64_t *denseStrides = (int64_t *)alloca(rank * sizeof(int64_t));
int64_t *sizes = descriptor->sizes;
for (int64_t i = rank - 1, runningStride = 1; i >= 0; i--) {
denseStrides[i] = runningStride;
runningStride *= sizes[i];
}
uint64_t sizeBytes = sizes[0] * denseStrides[0] * elementSizeBytes;
int64_t *strides = &sizes[rank];
(void)strides;
for (unsigned i = 0; i < rank; ++i)
assert(strides[i] == denseStrides[i] &&
"Mismatch in computed dense strides");
auto *ptr = descriptor->data + descriptor->offset * elementSizeBytes;
mgpuMemHostRegister(ptr, sizeBytes);
}
// Allows to unregister byte array with the CUDA runtime.
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostUnregister(void *ptr) {
ScopedContext scopedContext;
CUDA_REPORT_IF_ERROR(cuMemHostUnregister(ptr));
}
/// Unregisters a memref with the CUDA runtime. `descriptor` is a pointer to a
/// ranked memref descriptor struct of rank `rank`
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
mgpuMemHostUnregisterMemRef(int64_t rank,
StridedMemRefType<char, 1> *descriptor,
int64_t elementSizeBytes) {
auto *ptr = descriptor->data + descriptor->offset * elementSizeBytes;
mgpuMemHostUnregister(ptr);
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuSetDefaultDevice(int32_t device) {
defaultDevice = device;
}
///
/// Wrapper methods for the cuSparse library.
///
static inline cudaDataType_t dataTp(int32_t width) {
switch (width) {
case 32:
return CUDA_R_32F;
default:
return CUDA_R_64F;
}
}
static inline cusparseIndexType_t idxTp(int32_t width) {
switch (width) {
case 32:
return CUSPARSE_INDEX_32I;
default:
return CUSPARSE_INDEX_64I;
}
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
mgpuCreateSparseEnv(CUstream /*stream*/) {
cusparseHandle_t handle = nullptr;
CUSPARSE_REPORT_IF_ERROR(cusparseCreate(&handle))
return reinterpret_cast<void *>(handle);
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
mgpuDestroySparseEnv(void *h, CUstream /*stream*/) {
cusparseHandle_t handle = reinterpret_cast<cusparseHandle_t>(h);
CUSPARSE_REPORT_IF_ERROR(cusparseDestroy(handle))
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
mgpuCreateDnVec(intptr_t size, void *values, int32_t dw, CUstream /*stream*/) {
cusparseDnVecDescr_t vec = nullptr;
cudaDataType_t dtp = dataTp(dw);
CUSPARSE_REPORT_IF_ERROR(cusparseCreateDnVec(&vec, size, values, dtp))
return reinterpret_cast<void *>(vec);
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
mgpuDestroyDnVec(void *v, CUstream /*stream*/) {
cusparseDnVecDescr_t vec = reinterpret_cast<cusparseDnVecDescr_t>(v);
CUSPARSE_REPORT_IF_ERROR(cusparseDestroyDnVec(vec))
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
mgpuCreateDnMat(intptr_t rows, intptr_t cols, void *values, int32_t dw,
CUstream /*stream*/) {
cusparseDnMatDescr_t mat = nullptr;
cudaDataType_t dtp = dataTp(dw);
CUSPARSE_REPORT_IF_ERROR(cusparseCreateDnMat(&mat, rows, cols, /*ld=*/cols,
values, dtp, CUSPARSE_ORDER_ROW))
return reinterpret_cast<void *>(mat);
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
mgpuDestroyDnMat(void *m, CUstream /*stream*/) {
cusparseDnMatDescr_t mat = reinterpret_cast<cusparseDnMatDescr_t>(m);
CUSPARSE_REPORT_IF_ERROR(cusparseDestroyDnMat(mat))
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
mgpuCreateCoo(intptr_t rows, intptr_t cols, intptr_t nnz, void *rowIdxs,
void *colIdxs, void *values, int32_t iw, int32_t dw,
CUstream /*stream*/) {
cusparseSpMatDescr_t mat = nullptr;
cusparseIndexType_t itp = idxTp(iw);
cudaDataType_t dtp = dataTp(dw);
CUSPARSE_REPORT_IF_ERROR(cusparseCreateCoo(&mat, rows, cols, nnz, rowIdxs,
colIdxs, values, itp,
CUSPARSE_INDEX_BASE_ZERO, dtp))
return reinterpret_cast<void *>(mat);
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
mgpuCreateCsr(intptr_t rows, intptr_t cols, intptr_t nnz, void *rowPos,
void *colIdxs, void *values, int32_t pw, int32_t iw, int32_t dw,
CUstream /*stream*/) {
cusparseSpMatDescr_t mat = nullptr;
cusparseIndexType_t ptp = idxTp(pw);
cusparseIndexType_t itp = idxTp(iw);
cudaDataType_t dtp = dataTp(dw);
CUSPARSE_REPORT_IF_ERROR(cusparseCreateCsr(&mat, rows, cols, nnz, rowPos,
colIdxs, values, ptp, itp,
CUSPARSE_INDEX_BASE_ZERO, dtp))
return reinterpret_cast<void *>(mat);
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
mgpuDestroySpMat(void *m, CUstream /*stream*/) {
cusparseSpMatDescr_t mat = reinterpret_cast<cusparseSpMatDescr_t>(m);
CUSPARSE_REPORT_IF_ERROR(cusparseDestroySpMat(mat))
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT intptr_t
mgpuSpMVBufferSize(void *h, void *a, void *x, void *y, CUstream /*stream*/) {
cusparseHandle_t handle = reinterpret_cast<cusparseHandle_t>(h);
cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
cusparseDnVecDescr_t vecX = reinterpret_cast<cusparseDnVecDescr_t>(x);
cusparseDnVecDescr_t vecY = reinterpret_cast<cusparseDnVecDescr_t>(y);
double alpha = 1.0;
double beta = 1.0;
size_t bufferSize = 0;
CUSPARSE_REPORT_IF_ERROR(cusparseSpMV_bufferSize(
handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, matA, vecX, &beta, vecY,
CUDA_R_64F, CUSPARSE_SPMV_ALG_DEFAULT, &bufferSize))
return bufferSize == 0 ? 1 : bufferSize; // avoid zero-alloc
}
extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
mgpuSpMV(void *h, void *a, void *x, void *y, void *b, CUstream /*stream*/) {
cusparseHandle_t handle = reinterpret_cast<cusparseHandle_t>(h);
cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
cusparseDnVecDescr_t vecX = reinterpret_cast<cusparseDnVecDescr_t>(x);
cusparseDnVecDescr_t vecY = reinterpret_cast<cusparseDnVecDescr_t>(y);
double alpha = 1.0;
double beta = 1.0;
CUSPARSE_REPORT_IF_ERROR(
cusparseSpMV(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, matA, vecX,
&beta, vecY, CUDA_R_64F, CUSPARSE_SPMV_ALG_DEFAULT, b))
}
|