spla
Loading...
Searching...
No Matches
cl_v_eadd_fdb.hpp
Go to the documentation of this file.
1/**********************************************************************************/
2/* This file is part of spla project */
3/* https://github.com/JetBrains-Research/spla */
4/**********************************************************************************/
5/* MIT License */
6/* */
7/* Copyright (c) 2023 SparseLinearAlgebra */
8/* */
9/* Permission is hereby granted, free of charge, to any person obtaining a copy */
10/* of this software and associated documentation files (the "Software"), to deal */
11/* in the Software without restriction, including without limitation the rights */
12/* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell */
13/* copies of the Software, and to permit persons to whom the Software is */
14/* furnished to do so, subject to the following conditions: */
15/* */
16/* The above copyright notice and this permission notice shall be included in all */
17/* copies or substantial portions of the Software. */
18/* */
19/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR */
20/* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, */
21/* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE */
22/* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER */
23/* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, */
24/* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE */
25/* SOFTWARE. */
26/**********************************************************************************/
27
28#ifndef SPLA_CL_VECTOR_EADD_HPP
29#define SPLA_CL_VECTOR_EADD_HPP
30
32
33#include <core/dispatcher.hpp>
34#include <core/registry.hpp>
35#include <core/top.hpp>
36#include <core/tscalar.hpp>
37#include <core/ttype.hpp>
38#include <core/tvector.hpp>
39
40#include <opencl/cl_counter.hpp>
41#include <opencl/cl_fill.hpp>
42#include <opencl/cl_formats.hpp>
44
45#include <sstream>
46
47namespace spla {
48
49 template<typename T>
50 class Algo_v_eadd_fdb_cl final : public RegistryAlgo {
51 public:
52 ~Algo_v_eadd_fdb_cl() override = default;
53
54 std::string get_name() override {
55 return "v_eadd_fdb";
56 }
57
58 std::string get_description() override {
59 return "parallel vector element-wise add on opencl device";
60 }
61
62 Status execute(const DispatchContext& ctx) override {
63 auto t = ctx.task.template cast_safe<ScheduleTask_v_eadd_fdb>();
64 ref_ptr<TVector<T>> r = t->r.template cast_safe<TVector<T>>();
65 ref_ptr<TVector<T>> v = t->v.template cast_safe<TVector<T>>();
66
67 if (r->is_valid(FormatVector::AccDense) && v->is_valid(FormatVector::AccCoo)) {
68 return execute_sp2dn(ctx);
69 }
70 if (r->is_valid(FormatVector::AccDense) && v->is_valid(FormatVector::AccDense)) {
71 return execute_dn2dn(ctx);
72 }
73
74 return execute_sp2dn(ctx);
75 }
76
77 private:
78 Status execute_sp2dn(const DispatchContext& ctx) {
79 TIME_PROFILE_SCOPE("cl/vector_eadd_fdb_sp2dn");
80
81 auto t = ctx.task.template cast_safe<ScheduleTask_v_eadd_fdb>();
82 ref_ptr<TVector<T>> r = t->r.template cast_safe<TVector<T>>();
83 ref_ptr<TVector<T>> v = t->v.template cast_safe<TVector<T>>();
84 ref_ptr<TVector<T>> fdb = t->fdb.template cast_safe<TVector<T>>();
85 ref_ptr<TOpBinary<T, T, T>> op = t->op.template cast_safe<TOpBinary<T, T, T>>();
86
87 std::shared_ptr<CLProgram> m_program;
88 if (!ensure_kernel(op, m_program)) return Status::CompilationError;
89
90 r->validate_rwd(FormatVector::AccDense);
91 v->validate_rw(FormatVector::AccCoo);
92 fdb->validate_wd(FormatVector::AccCoo);
93
94 auto* p_cl_r = r->template get<CLDenseVec<T>>();
95 const auto* p_cl_v = v->template get<CLCooVec<T>>();
96 auto* p_cl_fdb = fdb->template get<CLCooVec<T>>();
97 auto* p_cl_acc = get_acc_cl();
98 auto& queue = p_cl_acc->get_queue_default();
99
100 const uint n = p_cl_v->values;
101
102 if (n == 0) return Status::Ok;
103
104 CLCounterWrapper cl_fdb_size;
105 cl::Buffer cl_fdb_i(p_cl_acc->get_context(), CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS, sizeof(uint) * n);
106 cl::Buffer cl_fdb_x(p_cl_acc->get_context(), CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS, sizeof(T) * n);
107
108 cl_fdb_size.set(queue, 0);
109
110 auto kernel = m_program->make_kernel("sparse_to_dense");
111 kernel.setArg(0, p_cl_r->Ax);
112 kernel.setArg(1, p_cl_v->Ai);
113 kernel.setArg(2, p_cl_v->Ax);
114 kernel.setArg(3, cl_fdb_i);
115 kernel.setArg(4, cl_fdb_x);
116 kernel.setArg(5, cl_fdb_size.buffer());
117 kernel.setArg(6, n);
118
119 cl::NDRange global(p_cl_acc->get_default_wgs() * div_up_clamp(n, p_cl_acc->get_default_wgs(), 1u, 1024u));
120 cl::NDRange local(p_cl_acc->get_default_wgs());
121 queue.enqueueNDRangeKernel(kernel, cl::NullRange, global, local);
122
123 p_cl_fdb->values = cl_fdb_size.get(queue);
124 p_cl_fdb->Ai = cl_fdb_i;
125 p_cl_fdb->Ax = cl_fdb_x;
126
127 return Status::Ok;
128 }
129
130 Status execute_dn2dn(const DispatchContext& ctx) {
131 TIME_PROFILE_SCOPE("cl/vector_eadd_fdb_dn2dn");
132
133 auto t = ctx.task.template cast_safe<ScheduleTask_v_eadd_fdb>();
134 ref_ptr<TVector<T>> r = t->r.template cast_safe<TVector<T>>();
135 ref_ptr<TVector<T>> v = t->v.template cast_safe<TVector<T>>();
136 ref_ptr<TVector<T>> fdb = t->fdb.template cast_safe<TVector<T>>();
137 ref_ptr<TOpBinary<T, T, T>> op = t->op.template cast_safe<TOpBinary<T, T, T>>();
138
139 std::shared_ptr<CLProgram> program;
140 if (!ensure_kernel(op, program)) return Status::CompilationError;
141
142 r->validate_rwd(FormatVector::AccDense);
143 v->validate_rw(FormatVector::AccDense);
144 fdb->validate_wd(FormatVector::AccDense);
145
146 auto* p_cl_r = r->template get<CLDenseVec<T>>();
147 const auto* p_cl_v = v->template get<CLDenseVec<T>>();
148 auto* p_cl_fdb = fdb->template get<CLDenseVec<T>>();
149 auto* p_cl_acc = get_acc_cl();
150 auto& queue = p_cl_acc->get_queue_default();
151
152 const uint n = r->get_n_rows();
153
154 cl_fill_value(queue, p_cl_fdb->Ax, n, fdb->get_fill_value());
155
156 auto kernel = program->make_kernel("dense_to_dense");
157 kernel.setArg(0, p_cl_r->Ax);
158 kernel.setArg(1, p_cl_v->Ax);
159 kernel.setArg(2, p_cl_fdb->Ax);
160 kernel.setArg(3, n);
161
162 cl::NDRange global(p_cl_acc->get_default_wgs() * div_up_clamp(n, p_cl_acc->get_default_wgs(), 1u, 1024u));
163 cl::NDRange local(p_cl_acc->get_default_wgs());
164 queue.enqueueNDRangeKernel(kernel, cl::NullRange, global, local);
165
166 return Status::Ok;
167 }
168
169 bool ensure_kernel(const ref_ptr<TOpBinary<T, T, T>>& op, std::shared_ptr<CLProgram>& program) {
170 CLProgramBuilder program_builder;
171 program_builder
172 .set_name("vector_eadd_fdb")
173 .add_type("TYPE", get_ttype<T>().template as<Type>())
174 .add_op("OP_BINARY", op.template as<OpBinary>())
175 .set_source(source_vector_eadd_fdb)
176 .acquire();
177
178 program = program_builder.get_program();
179
180 return true;
181 }
182 };
183
184}// namespace spla
185
186#endif//SPLA_CL_VECTOR_EADD_HPP
Status of library operation execution.
Definition cl_v_eadd_fdb.hpp:50
~Algo_v_eadd_fdb_cl() override=default
std::string get_description() override
Definition cl_v_eadd_fdb.hpp:58
Status execute(const DispatchContext &ctx) override
Definition cl_v_eadd_fdb.hpp:62
std::string get_name() override
Definition cl_v_eadd_fdb.hpp:54
Definition cl_counter.hpp:58
uint get(cl::CommandQueue &queue, cl::Event *event=nullptr)
Definition cl_counter.cpp:53
cl::Buffer & buffer()
Definition cl_counter.cpp:59
void set(cl::CommandQueue &queue, uint value, cl::Event *event=nullptr)
Definition cl_counter.cpp:56
Algorithm suitable to process schedule task based on task string key.
Definition registry.hpp:66
Automates reference counting and behaves as shared smart pointer.
Definition ref.hpp:117
std::uint32_t uint
Library index and size type.
Definition config.hpp:56
Definition algorithm.hpp:37
void cl_fill_value(cl::CommandQueue &queue, const cl::Buffer &values, uint n, T value)
Definition cl_fill.hpp:60
Execution context of a single task.
Definition dispatcher.hpp:46
ref_ptr< ScheduleTask > task
Definition dispatcher.hpp:48
#define TIME_PROFILE_SCOPE(name)
Definition time_profiler.hpp:92