spla
cpu_v_reduce.hpp
Go to the documentation of this file.
1 /**********************************************************************************/
2 /* This file is part of spla project */
3 /* https://github.com/SparseLinearAlgebra/spla */
4 /**********************************************************************************/
5 /* MIT License */
6 /* */
7 /* Copyright (c) 2023 SparseLinearAlgebra */
8 /* */
9 /* Permission is hereby granted, free of charge, to any person obtaining a copy */
10 /* of this software and associated documentation files (the "Software"), to deal */
11 /* in the Software without restriction, including without limitation the rights */
12 /* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell */
13 /* copies of the Software, and to permit persons to whom the Software is */
14 /* furnished to do so, subject to the following conditions: */
15 /* */
16 /* The above copyright notice and this permission notice shall be included in all */
17 /* copies or substantial portions of the Software. */
18 /* */
19 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR */
20 /* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, */
21 /* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE */
22 /* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER */
23 /* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, */
24 /* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE */
25 /* SOFTWARE. */
26 /**********************************************************************************/
27 
28 #ifndef SPLA_CPU_V_REDUCE_HPP
29 #define SPLA_CPU_V_REDUCE_HPP
30 
32 
33 #include <core/dispatcher.hpp>
34 #include <core/registry.hpp>
35 #include <core/top.hpp>
36 #include <core/tscalar.hpp>
37 #include <core/ttype.hpp>
38 #include <core/tvector.hpp>
39 
40 namespace spla {
41 
42  template<typename T>
43  class Algo_v_reduce_cpu final : public RegistryAlgo {
44  public:
45  ~Algo_v_reduce_cpu() override = default;
46 
47  std::string get_name() override {
48  return "v_reduce";
49  }
50 
51  std::string get_description() override {
52  return "sequential vector reduction on cpu";
53  }
54 
55  Status execute(const DispatchContext& ctx) override {
56  auto t = ctx.task.template cast_safe<ScheduleTask_v_reduce>();
57  ref_ptr<TVector<T>> v = t->v.template cast_safe<TVector<T>>();
58 
59  if (v->is_valid(FormatVector::CpuCoo)) {
60  return execute_sp(ctx);
61  }
62  if (v->is_valid(FormatVector::CpuDense)) {
63  return execute_dn(ctx);
64  }
65 
66  return execute_sp(ctx);
67  }
68 
69  private:
70  Status execute_sp(const DispatchContext& ctx) {
71  TIME_PROFILE_SCOPE("cpu/vector_reduce_sparse");
72 
73  auto t = ctx.task.template cast_safe<ScheduleTask_v_reduce>();
74 
75  auto r = t->r.template cast_safe<TScalar<T>>();
76  auto s = t->s.template cast_safe<TScalar<T>>();
77  auto v = t->v.template cast_safe<TVector<T>>();
78  auto op_reduce = t->op_reduce.template cast_safe<TOpBinary<T, T, T>>();
79 
80  T sum = s->get_value();
81 
82  v->validate_rw(FormatVector::CpuCoo);
83  const auto* p_sparse = v->template get<CpuCooVec<T>>();
84  const auto& function = op_reduce->function;
85 
86  for (const auto& value : p_sparse->Ax) {
87  sum = function(sum, value);
88  }
89 
90  r->get_value() = sum;
91 
92  return Status::Ok;
93  }
94 
95  Status execute_dn(const DispatchContext& ctx) {
96  TIME_PROFILE_SCOPE("cpu/vector_reduce_dense");
97 
98  auto t = ctx.task.template cast_safe<ScheduleTask_v_reduce>();
99 
100  auto r = t->r.template cast_safe<TScalar<T>>();
101  auto s = t->s.template cast_safe<TScalar<T>>();
102  auto v = t->v.template cast_safe<TVector<T>>();
103  auto op_reduce = t->op_reduce.template cast_safe<TOpBinary<T, T, T>>();
104 
105  T sum = s->get_value();
106 
107  v->validate_rw(FormatVector::CpuDense);
108  const auto* p_dense = v->template get<CpuDenseVec<T>>();
109  const auto& function = op_reduce->function;
110 
111  for (const auto& value : p_dense->Ax) {
112  sum = function(sum, value);
113  }
114 
115  r->get_value() = sum;
116 
117  return Status::Ok;
118  }
119  };
120 
121 }// namespace spla
122 
123 #endif//SPLA_CPU_V_REDUCE_HPP
Status of library operation execution.
Definition: cpu_v_reduce.hpp:43
std::string get_description() override
Definition: cpu_v_reduce.hpp:51
std::string get_name() override
Definition: cpu_v_reduce.hpp:47
Status execute(const DispatchContext &ctx) override
Definition: cpu_v_reduce.hpp:55
~Algo_v_reduce_cpu() override=default
Algorithm suitable to process schedule task based on task string key.
Definition: registry.hpp:66
Automates reference counting and behaves as shared smart pointer.
Definition: ref.hpp:117
Definition: algorithm.hpp:37
Execution context of a single task.
Definition: dispatcher.hpp:46
ref_ptr< ScheduleTask > task
Definition: dispatcher.hpp:48
#define TIME_PROFILE_SCOPE(name)
Definition: time_profiler.hpp:92