Proteus
Programmable JIT compilation and optimization for C/C++ using LLVM
Loading...
Searching...
No Matches
DispatcherCUDA.hpp
Go to the documentation of this file.
1#ifndef PROTEUS_FRONTEND_DISPATCHER_CUDA_HPP
2#define PROTEUS_FRONTEND_DISPATCHER_CUDA_HPP
3
4#if PROTEUS_ENABLE_CUDA
5
8
9namespace proteus {
10
11class DispatcherCUDA : public Dispatcher {
12public:
13 static DispatcherCUDA &instance() {
14 static DispatcherCUDA D;
15 return D;
16 }
17
18 std::unique_ptr<MemoryBuffer>
19 compile([[maybe_unused]] std::unique_ptr<LLVMContext> Ctx,
20 std::unique_ptr<Module> Mod, HashT ModuleHash,
21 bool DisableIROpt = false) override {
22 // This is necessary to ensure Ctx outlives M. Setting [[maybe_unused]] can
23 // trigger a lifetime bug.
24 auto CtxOwner = std::move(Ctx);
25 auto ModOwner = std::move(Mod);
26
27 // CMake finds LIBDEVICE_BC_PATH.
28 auto LibDeviceBuffer = llvm::MemoryBuffer::getFile(LIBDEVICE_BC_PATH);
30 LibDeviceBuffer->get()->getMemBufferRef(), ModOwner->getContext());
31
33 linker.linkInModule(std::move(LibDeviceModule.get()));
34
35 std::unique_ptr<MemoryBuffer> ObjectModule =
36 Jit.compileOnly(*ModOwner, DisableIROpt);
37 if (!ObjectModule)
38 PROTEUS_FATAL_ERROR("Expected non-null object library");
39
40 StorageCache.store(ModuleHash, ObjectModule->getMemBufferRef());
41
42 return ObjectModule;
43 }
44
45 std::unique_ptr<CompiledLibrary>
46 lookupCompiledLibrary(HashT ModuleHash) override {
47 return StorageCache.lookup(ModuleHash);
48 }
49
50 DispatchResult launch(void *KernelFunc, LaunchDims GridDim,
52 uint64_t ShmemSize, void *Stream) override {
53 dim3 CudaGridDim = {GridDim.X, GridDim.Y, GridDim.Z};
54 dim3 CudaBlockDim = {BlockDim.X, BlockDim.Y, BlockDim.Z};
55 cudaStream_t CudaStream = reinterpret_cast<cudaStream_t>(Stream);
56
57 void **KernelArgsPtrs = const_cast<void **>(KernelArgs.data());
61 }
62
63 StringRef getDeviceArch() const override { return Jit.getDeviceArch(); }
64
65 void *getFunctionAddress(StringRef KernelName, HashT ModuleHash,
66 CompiledLibrary &Library) override {
67 auto GetKernelFunc = [&]() {
68 // Hash the kernel name to get a unique id.
69 HashT HashValue = hash(KernelName, ModuleHash);
70
71 if (auto KernelFunc = CodeCache.lookup(HashValue))
72 return KernelFunc;
73
75 KernelName, Library.ObjectModule->getBufferStart(),
76 /*RelinkGlobalsByCopy*/ false,
77 /* VarNameToDevPtr */ {});
78
79 CodeCache.insert(HashValue, KernelFunc, KernelName);
80
81 return KernelFunc;
82 };
83
85 return KernelFunc;
86 }
87
88 void registerDynamicLibrary(HashT, const SmallString<128> &) override {
90 "Dispatch CUDA does not support registerDynamicLibrary");
91 }
92
94 CodeCache.printStats();
95 StorageCache.printStats();
96 }
97
98private:
99 JitEngineDeviceCUDA &Jit;
100 DispatcherCUDA() : Jit(JitEngineDeviceCUDA::instance()) {
101 TargetModel = TargetModelType::CUDA;
102 }
103 JitCache<CUfunction> CodeCache;
104 JitStorageCache StorageCache;
105};
106
107} // namespace proteus
108
109#endif
110
111#endif // PROTEUS_FRONTEND_DISPATCHER_CUDA_HPP
void char * KernelName
Definition CompilerInterfaceDevice.cpp:50
auto & Jit
Definition CompilerInterfaceDevice.cpp:54
#define PROTEUS_FATAL_ERROR(x)
Definition Error.h:7
Definition BuiltinsCUDA.cpp:4
HashT hash(FirstT &&First, RestTs &&...Rest)
Definition Hashing.hpp:126
T getRuntimeConstantValue(void *Arg)
Definition CompilerInterfaceRuntimeConstantInfo.h:114
cudaError_t launchKernelFunction(CUfunction KernelFunc, dim3 GridDim, dim3 BlockDim, void **KernelArgs, uint64_t ShmemSize, CUstream Stream)
Definition CoreDeviceCUDA.hpp:51
CUfunction getKernelFunctionFromImage(StringRef KernelName, const void *Image, bool RelinkGlobalsByCopy, const std::unordered_map< std::string, const void * > &VarNameToDevPtr)
Definition CoreDeviceCUDA.hpp:27
Definition Dispatcher.hpp:16
unsigned Z
Definition Dispatcher.hpp:17
unsigned Y
Definition Dispatcher.hpp:17
unsigned X
Definition Dispatcher.hpp:17