spla
cpu_v_eadd.hpp
Go to the documentation of this file.
1 /**********************************************************************************/
2 /* This file is part of spla project */
3 /* https://github.com/JetBrains-Research/spla */
4 /**********************************************************************************/
5 /* MIT License */
6 /* */
7 /* Copyright (c) 2023 SparseLinearAlgebra */
8 /* */
9 /* Permission is hereby granted, free of charge, to any person obtaining a copy */
10 /* of this software and associated documentation files (the "Software"), to deal */
11 /* in the Software without restriction, including without limitation the rights */
12 /* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell */
13 /* copies of the Software, and to permit persons to whom the Software is */
14 /* furnished to do so, subject to the following conditions: */
15 /* */
16 /* The above copyright notice and this permission notice shall be included in all */
17 /* copies or substantial portions of the Software. */
18 /* */
19 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR */
20 /* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, */
21 /* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE */
22 /* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER */
23 /* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, */
24 /* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE */
25 /* SOFTWARE. */
26 /**********************************************************************************/
27 
28 #ifndef SPLA_CPU_V_EADD_HPP
29 #define SPLA_CPU_V_EADD_HPP
30 
32 
33 #include <core/dispatcher.hpp>
34 #include <core/registry.hpp>
35 #include <core/top.hpp>
36 #include <core/tscalar.hpp>
37 #include <core/ttype.hpp>
38 #include <core/tvector.hpp>
39 
40 namespace spla {
41 
42  template<typename T>
43  class Algo_v_eadd_cpu final : public RegistryAlgo {
44  public:
45  ~Algo_v_eadd_cpu() override = default;
46 
47  std::string get_name() override {
48  return "v_eadd";
49  }
50 
51  std::string get_description() override {
52  return "sequential element-wise add vector operation";
53  }
54 
55  Status execute(const DispatchContext& ctx) override {
56  auto t = ctx.task.template cast_safe<ScheduleTask_v_eadd>();
57  ref_ptr<TVector<T>> u = t->u.template cast_safe<TVector<T>>();
58  ref_ptr<TVector<T>> v = t->v.template cast_safe<TVector<T>>();
59 
60  if (u->is_valid(FormatVector::CpuCoo) && v->is_valid(FormatVector::CpuCoo)) {
61  return execute_spNsp(ctx);
62  }
63  if (u->is_valid(FormatVector::CpuDense) && v->is_valid(FormatVector::CpuDense)) {
64  return execute_dnNdn(ctx);
65  }
66 
67  return execute_spNsp(ctx);
68  }
69 
70  private:
71  Status execute_spNsp(const DispatchContext& ctx) {
72  TIME_PROFILE_SCOPE("cpu/vector_eadd_spNsp");
73 
74  auto t = ctx.task.template cast_safe<ScheduleTask_v_eadd>();
75  ref_ptr<TVector<T>> r = t->r.template cast_safe<TVector<T>>();
76  ref_ptr<TVector<T>> u = t->u.template cast_safe<TVector<T>>();
77  ref_ptr<TVector<T>> v = t->v.template cast_safe<TVector<T>>();
78  ref_ptr<TOpBinary<T, T, T>> op = t->op.template cast_safe<TOpBinary<T, T, T>>();
79 
80  r->validate_wd(FormatVector::CpuCoo);
81  u->validate_rw(FormatVector::CpuCoo);
82  v->validate_rw(FormatVector::CpuCoo);
83 
84  CpuCooVec<T>* p_r = r->template get<CpuCooVec<T>>();
85  const CpuCooVec<T>* p_u = u->template get<CpuCooVec<T>>();
86  const CpuCooVec<T>* p_v = v->template get<CpuCooVec<T>>();
87  const auto& function = op->function;
88 
89  const T u_fill_value = u->get_fill_value();
90  const T v_fill_value = v->get_fill_value();
91 
92  assert(p_r->Ax.empty());
93 
94  const auto u_count = p_u->values;
95  const auto v_count = p_v->values;
96  uint u_iter = 0;
97  uint v_iter = 0;
98 
99  while (u_iter < u_count && v_iter < v_count) {
100  if (p_u->Ai[u_iter] < p_v->Ai[v_iter]) {
101  p_r->Ai.push_back(p_u->Ai[u_iter]);
102  p_r->Ax.push_back(function(p_u->Ax[u_iter], v_fill_value));
103  u_iter += 1;
104  } else if (p_v->Ai[v_iter] < p_u->Ai[u_iter]) {
105  p_r->Ai.push_back(p_v->Ai[v_iter]);
106  p_r->Ax.push_back(function(u_fill_value, p_v->Ax[v_iter]));
107  v_iter += 1;
108  } else {
109  p_r->Ai.push_back(p_u->Ai[u_iter]);
110  p_r->Ax.push_back(function(p_u->Ax[u_iter], p_v->Ax[v_iter]));
111  u_iter += 1;
112  v_iter += 1;
113  }
114  p_r->values += 1;
115  }
116  while (u_iter < u_count) {
117  p_r->Ai.push_back(p_u->Ai[u_iter]);
118  p_r->Ax.push_back(function(p_u->Ax[u_iter], v_fill_value));
119  u_iter += 1;
120  p_r->values += 1;
121  }
122  while (v_iter < v_count) {
123  p_r->Ai.push_back(p_v->Ai[v_iter]);
124  p_r->Ax.push_back(function(u_fill_value, p_v->Ax[v_iter]));
125  v_iter += 1;
126  p_r->values += 1;
127  }
128 
129  return Status::Ok;
130  }
131  Status execute_dnNdn(const DispatchContext& ctx) {
132  TIME_PROFILE_SCOPE("cpu/vector_eadd_dnNdn");
133 
134  auto t = ctx.task.template cast_safe<ScheduleTask_v_eadd>();
135  ref_ptr<TVector<T>> r = t->r.template cast_safe<TVector<T>>();
136  ref_ptr<TVector<T>> u = t->u.template cast_safe<TVector<T>>();
137  ref_ptr<TVector<T>> v = t->v.template cast_safe<TVector<T>>();
138  ref_ptr<TOpBinary<T, T, T>> op = t->op.template cast_safe<TOpBinary<T, T, T>>();
139 
140  r->validate_wd(FormatVector::CpuDense);
141  u->validate_rw(FormatVector::CpuDense);
142  v->validate_rw(FormatVector::CpuDense);
143 
144  auto* p_r = r->template get<CpuDenseVec<T>>();
145  const auto* p_u = u->template get<CpuDenseVec<T>>();
146  const auto* p_v = v->template get<CpuDenseVec<T>>();
147  const auto& function = op->function;
148 
149  const uint N = r->get_n_rows();
150 
151  for (uint i = 0; i < N; i++) {
152  p_r->Ax[i] = function(p_u->Ax[i], p_v->Ax[i]);
153  }
154 
155  return Status::Ok;
156  }
157  };
158 
159 }// namespace spla
160 
161 #endif//SPLA_CPU_V_EADD_HPP
Status of library operation execution.
Definition: cpu_v_eadd.hpp:43
Status execute(const DispatchContext &ctx) override
Definition: cpu_v_eadd.hpp:55
~Algo_v_eadd_cpu() override=default
std::string get_description() override
Definition: cpu_v_eadd.hpp:51
std::string get_name() override
Definition: cpu_v_eadd.hpp:47
CPU list-of-coordinates sparse vector representation.
Definition: cpu_formats.hpp:90
std::vector< uint > Ai
Definition: cpu_formats.hpp:96
std::vector< T > Ax
Definition: cpu_formats.hpp:97
Algorithm suitable to process schedule task based on task string key.
Definition: registry.hpp:66
uint values
Definition: tdecoration.hpp:58
Automates reference counting and behaves as shared smart pointer.
Definition: ref.hpp:117
std::uint32_t uint
Library index and size type.
Definition: config.hpp:56
Definition: algorithm.hpp:37
Execution context of a single task.
Definition: dispatcher.hpp:46
ref_ptr< ScheduleTask > task
Definition: dispatcher.hpp:48
#define TIME_PROFILE_SCOPE(name)
Definition: time_profiler.hpp:92