spla
Loading...
Searching...
No Matches
cpu_v_emult.hpp
Go to the documentation of this file.
1/**********************************************************************************/
2/* This file is part of spla project */
3/* https://github.com/JetBrains-Research/spla */
4/**********************************************************************************/
5/* MIT License */
6/* */
7/* Copyright (c) 2023 SparseLinearAlgebra */
8/* */
9/* Permission is hereby granted, free of charge, to any person obtaining a copy */
10/* of this software and associated documentation files (the "Software"), to deal */
11/* in the Software without restriction, including without limitation the rights */
12/* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell */
13/* copies of the Software, and to permit persons to whom the Software is */
14/* furnished to do so, subject to the following conditions: */
15/* */
16/* The above copyright notice and this permission notice shall be included in all */
17/* copies or substantial portions of the Software. */
18/* */
19/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR */
20/* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, */
21/* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE */
22/* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER */
23/* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, */
24/* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE */
25/* SOFTWARE. */
26/**********************************************************************************/
27
28#ifndef SPLA_CPU_V_EMULT_HPP
29#define SPLA_CPU_V_EMULT_HPP
30
32
33#include <core/dispatcher.hpp>
34#include <core/registry.hpp>
35#include <core/top.hpp>
36#include <core/tscalar.hpp>
37#include <core/ttype.hpp>
38#include <core/tvector.hpp>
39
40namespace spla {
41
42 template<typename T>
43 class Algo_v_emult_cpu final : public RegistryAlgo {
44 public:
45 ~Algo_v_emult_cpu() override = default;
46
47 std::string get_name() override {
48 return "v_emult";
49 }
50
51 std::string get_description() override {
52 return "sequential element-wise mult vector operation";
53 }
54
55 Status execute(const DispatchContext& ctx) override {
56 auto t = ctx.task.template cast_safe<ScheduleTask_v_emult>();
57 ref_ptr<TVector<T>> u = t->u.template cast_safe<TVector<T>>();
58 ref_ptr<TVector<T>> v = t->v.template cast_safe<TVector<T>>();
59
60 if (u->is_valid(FormatVector::CpuCoo) && v->is_valid(FormatVector::CpuCoo)) {
61 return execute_spNsp(ctx);
62 }
63 if (u->is_valid(FormatVector::CpuCoo) && v->is_valid(FormatVector::CpuDense)) {
64 return execute_spNdn(ctx);
65 }
66 if (u->is_valid(FormatVector::CpuDense) && v->is_valid(FormatVector::CpuCoo)) {
67 return execute_dnNsp(ctx);
68 }
69
70 return execute_spNsp(ctx);
71 }
72
73 private:
74 Status execute_spNsp(const DispatchContext& ctx) {
75 TIME_PROFILE_SCOPE("cpu/vector_emult_spNsp");
76
77 auto t = ctx.task.template cast_safe<ScheduleTask_v_emult>();
78 ref_ptr<TVector<T>> r = t->r.template cast_safe<TVector<T>>();
79 ref_ptr<TVector<T>> u = t->u.template cast_safe<TVector<T>>();
80 ref_ptr<TVector<T>> v = t->v.template cast_safe<TVector<T>>();
81 ref_ptr<TOpBinary<T, T, T>> op = t->op.template cast_safe<TOpBinary<T, T, T>>();
82
83 r->validate_wd(FormatVector::CpuCoo);
84 u->validate_rw(FormatVector::CpuCoo);
85 v->validate_rw(FormatVector::CpuCoo);
86
87 CpuCooVec<T>* p_r = r->template get<CpuCooVec<T>>();
88 const CpuCooVec<T>* p_u = u->template get<CpuCooVec<T>>();
89 const CpuCooVec<T>* p_v = v->template get<CpuCooVec<T>>();
90 const auto& function = op->function;
91
92 assert(p_r->Ai.empty());
93 assert(p_r->Ax.empty());
94
95 const auto u_count = p_u->values;
96 const auto v_count = p_v->values;
97 uint u_iter = 0;
98 uint v_iter = 0;
99
100 while (u_iter < u_count && v_iter < v_count) {
101 if (p_u->Ai[u_iter] < p_v->Ai[v_iter]) {
102 u_iter += 1;
103 } else if (p_v->Ai[v_iter] < p_u->Ai[u_iter]) {
104 v_iter += 1;
105 } else {
106 p_r->values += 1;
107 p_r->Ai.push_back(p_u->Ai[u_iter]);
108 p_r->Ax.push_back(function(p_u->Ax[u_iter], p_v->Ax[v_iter]));
109 u_iter += 1;
110 v_iter += 1;
111 }
112 }
113
114 return Status::Ok;
115 }
116 Status execute_spNdn(const DispatchContext& ctx) {
117 TIME_PROFILE_SCOPE("cpu/vector_emult_spNdn");
118
119 auto t = ctx.task.template cast_safe<ScheduleTask_v_emult>();
120 ref_ptr<TVector<T>> r = t->r.template cast_safe<TVector<T>>();
121 ref_ptr<TVector<T>> u = t->u.template cast_safe<TVector<T>>();
122 ref_ptr<TVector<T>> v = t->v.template cast_safe<TVector<T>>();
123 ref_ptr<TOpBinary<T, T, T>> op = t->op.template cast_safe<TOpBinary<T, T, T>>();
124
125 r->validate_wd(FormatVector::CpuCoo);
126 u->validate_rw(FormatVector::CpuCoo);
127 v->validate_rw(FormatVector::CpuDense);
128
129 CpuCooVec<T>* p_r = r->template get<CpuCooVec<T>>();
130 const CpuCooVec<T>* p_u = u->template get<CpuCooVec<T>>();
131 const CpuDenseVec<T>* p_v = v->template get<CpuDenseVec<T>>();
132 const auto& function = op->function;
133 const auto skip = v->get_fill_value();
134
135 assert(p_r->Ai.empty());
136 assert(p_r->Ax.empty());
137
138 for (uint k = 0; k < p_u->values; k++) {
139 const uint i = p_u->Ai[k];
140
141 if (p_v->Ax[i] != skip) {
142 p_r->values += 1;
143 p_r->Ai.push_back(i);
144 p_r->Ax.push_back(function(p_u->Ax[k], p_v->Ax[i]));
145 }
146 }
147
148 return Status::Ok;
149 }
150 Status execute_dnNsp(const DispatchContext& ctx) {
151 TIME_PROFILE_SCOPE("cpu/vector_emult_dnNsp");
152
153 auto t = ctx.task.template cast_safe<ScheduleTask_v_emult>();
154 ref_ptr<TVector<T>> r = t->r.template cast_safe<TVector<T>>();
155 ref_ptr<TVector<T>> u = t->u.template cast_safe<TVector<T>>();
156 ref_ptr<TVector<T>> v = t->v.template cast_safe<TVector<T>>();
157 ref_ptr<TOpBinary<T, T, T>> op = t->op.template cast_safe<TOpBinary<T, T, T>>();
158
159 r->validate_wd(FormatVector::CpuCoo);
160 u->validate_rw(FormatVector::CpuDense);
161 v->validate_rw(FormatVector::CpuCoo);
162
163 CpuCooVec<T>* p_r = r->template get<CpuCooVec<T>>();
164 const CpuDenseVec<T>* p_u = u->template get<CpuDenseVec<T>>();
165 const CpuCooVec<T>* p_v = v->template get<CpuCooVec<T>>();
166 const auto& function = op->function;
167 const auto skip = u->get_fill_value();
168
169 assert(p_r->Ai.empty());
170 assert(p_r->Ax.empty());
171
172 for (uint k = 0; k < p_v->values; k++) {
173 const uint i = p_v->Ai[k];
174
175 if (p_u->Ax[i] != skip) {
176 p_r->values += 1;
177 p_r->Ai.push_back(i);
178 p_r->Ax.push_back(function(p_u->Ax[i], p_v->Ax[k]));
179 }
180 }
181
182 return Status::Ok;
183 }
184 };
185
186}// namespace spla
187
188#endif//SPLA_CPU_V_EMULT_HPP
Status of library operation execution.
Definition cpu_v_emult.hpp:43
~Algo_v_emult_cpu() override=default
std::string get_description() override
Definition cpu_v_emult.hpp:51
Status execute(const DispatchContext &ctx) override
Definition cpu_v_emult.hpp:55
std::string get_name() override
Definition cpu_v_emult.hpp:47
CPU list-of-coordinates sparse vector representation.
Definition cpu_formats.hpp:90
std::vector< uint > Ai
Definition cpu_formats.hpp:96
std::vector< T > Ax
Definition cpu_formats.hpp:97
Algorithm suitable to process schedule task based on task string key.
Definition registry.hpp:66
uint values
Definition tdecoration.hpp:58
Automates reference counting and behaves as shared smart pointer.
Definition ref.hpp:117
std::uint32_t uint
Library index and size type.
Definition config.hpp:56
Definition algorithm.hpp:37
Execution context of a single task.
Definition dispatcher.hpp:46
ref_ptr< ScheduleTask > task
Definition dispatcher.hpp:48
#define TIME_PROFILE_SCOPE(name)
Definition time_profiler.hpp:92