cuda-runtime-wrappers.cpp
3.74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
//===- cuda-runtime-wrappers.cpp - MLIR CUDA runner wrapper library -------===//
//
// Part of the MLIR Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Implements C wrappers around the CUDA library for easy linking in ORC jit.
// Also adds some debugging helpers that are helpful when writing MLIR code to
// run on GPUs.
//
//===----------------------------------------------------------------------===//
#include <cassert>
#include <numeric>
#include "llvm/Support/raw_ostream.h"
#include "cuda.h"
namespace {
int32_t reportErrorIfAny(CUresult result, const char *where) {
if (result != CUDA_SUCCESS) {
llvm::errs() << "CUDA failed with " << result << " in " << where << "\n";
}
return result;
}
} // anonymous namespace
extern "C" int32_t mcuModuleLoad(void **module, void *data) {
int32_t err = reportErrorIfAny(
cuModuleLoadData(reinterpret_cast<CUmodule *>(module), data),
"ModuleLoad");
return err;
}
extern "C" int32_t mcuModuleGetFunction(void **function, void *module,
const char *name) {
return reportErrorIfAny(
cuModuleGetFunction(reinterpret_cast<CUfunction *>(function),
reinterpret_cast<CUmodule>(module), name),
"GetFunction");
}
// The wrapper uses intptr_t instead of CUDA's unsigned int to match
// the type of MLIR's index type. This avoids the need for casts in the
// generated MLIR code.
extern "C" int32_t mcuLaunchKernel(void *function, intptr_t gridX,
intptr_t gridY, intptr_t gridZ,
intptr_t blockX, intptr_t blockY,
intptr_t blockZ, int32_t smem, void *stream,
void **params, void **extra) {
return reportErrorIfAny(
cuLaunchKernel(reinterpret_cast<CUfunction>(function), gridX, gridY,
gridZ, blockX, blockY, blockZ, smem,
reinterpret_cast<CUstream>(stream), params, extra),
"LaunchKernel");
}
extern "C" void *mcuGetStreamHelper() {
CUstream stream;
reportErrorIfAny(cuStreamCreate(&stream, CU_STREAM_DEFAULT), "StreamCreate");
return stream;
}
extern "C" int32_t mcuStreamSynchronize(void *stream) {
return reportErrorIfAny(
cuStreamSynchronize(reinterpret_cast<CUstream>(stream)), "StreamSync");
}
/// Helper functions for writing mlir example code
// Allows to register byte array with the CUDA runtime. Helpful until we have
// transfer functions implemented.
extern "C" void mcuMemHostRegister(void *ptr, uint64_t sizeBytes) {
reportErrorIfAny(cuMemHostRegister(ptr, sizeBytes, /*flags=*/0),
"MemHostRegister");
}
// A struct that corresponds to how MLIR represents memrefs.
template <typename T, int N> struct MemRefType {
T *basePtr;
T *data;
int64_t offset;
int64_t sizes[N];
int64_t strides[N];
};
// Allows to register a MemRef with the CUDA runtime. Initializes array with
// value. Helpful until we have transfer functions implemented.
template <typename T, int N>
void mcuMemHostRegisterMemRef(const MemRefType<T, N> *arg, T value) {
auto count = std::accumulate(arg->sizes, arg->sizes + N, 1,
std::multiplies<int64_t>());
std::fill_n(arg->data, count, value);
mcuMemHostRegister(arg->data, count * sizeof(T));
}
extern "C" void
mcuMemHostRegisterMemRef1dFloat(const MemRefType<float, 1> *arg) {
mcuMemHostRegisterMemRef(arg, 1.23f);
}
extern "C" void
mcuMemHostRegisterMemRef3dFloat(const MemRefType<float, 3> *arg) {
mcuMemHostRegisterMemRef(arg, 1.23f);
}