spla
cpu_m_transpose.hpp
Go to the documentation of this file.
1 /**********************************************************************************/
2 /* This file is part of spla project */
3 /* https://github.com/JetBrains-Research/spla */
4 /**********************************************************************************/
5 /* MIT License */
6 /* */
7 /* Copyright (c) 2023 SparseLinearAlgebra */
8 /* */
9 /* Permission is hereby granted, free of charge, to any person obtaining a copy */
10 /* of this software and associated documentation files (the "Software"), to deal */
11 /* in the Software without restriction, including without limitation the rights */
12 /* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell */
13 /* copies of the Software, and to permit persons to whom the Software is */
14 /* furnished to do so, subject to the following conditions: */
15 /* */
16 /* The above copyright notice and this permission notice shall be included in all */
17 /* copies or substantial portions of the Software. */
18 /* */
19 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR */
20 /* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, */
21 /* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE */
22 /* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER */
23 /* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, */
24 /* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE */
25 /* SOFTWARE. */
26 /**********************************************************************************/
27 
28 #ifndef SPLA_CPU_M_TRANSPOSE_HPP
29 #define SPLA_CPU_M_TRANSPOSE_HPP
30 
32 
33 #include <core/dispatcher.hpp>
34 #include <core/registry.hpp>
35 #include <core/tmatrix.hpp>
36 #include <core/top.hpp>
37 #include <core/tscalar.hpp>
38 #include <core/ttype.hpp>
39 #include <core/tvector.hpp>
40 
41 #include <algorithm>
42 #include <numeric>
43 
44 namespace spla {
45 
46  template<typename T>
47  class Algo_m_transpose_cpu final : public RegistryAlgo {
48  public:
49  ~Algo_m_transpose_cpu() override = default;
50 
51  std::string get_name() override {
52  return "m_transpose";
53  }
54 
55  std::string get_description() override {
56  return "transpose matrix on cpu sequentially";
57  }
58 
59  Status execute(const DispatchContext& ctx) override {
60  auto t = ctx.task.template cast_safe<ScheduleTask_m_transpose>();
61  auto M = t->M.template cast_safe<TMatrix<T>>();
62 
63  if (M->is_valid(FormatMatrix::CpuCsr)) {
64  return execute_csr(ctx);
65  }
66  if (M->is_valid(FormatMatrix::CpuLil)) {
67  return execute_lil(ctx);
68  }
69  if (M->is_valid(FormatMatrix::CpuDok)) {
70  return execute_dok(ctx);
71  }
72 
73  return execute_csr(ctx);
74  }
75 
76  private:
77  Status execute_dok(const DispatchContext& ctx) {
78  TIME_PROFILE_SCOPE("cpu/m_transpose_dok");
79 
80  auto t = ctx.task.template cast_safe<ScheduleTask_m_transpose>();
81 
82  ref_ptr<TMatrix<T>> R = t->R.template cast_safe<TMatrix<T>>();
83  ref_ptr<TMatrix<T>> M = t->M.template cast_safe<TMatrix<T>>();
84  ref_ptr<TOpUnary<T, T>> op_apply = t->op_apply.template cast_safe<TOpUnary<T, T>>();
85 
86  R->validate_wd(FormatMatrix::CpuDok);
87  M->validate_rw(FormatMatrix::CpuDok);
88 
89  CpuDok<T>* p_dok_R = R->template get<CpuDok<T>>();
90  const CpuDok<T>* p_dok_M = M->template get<CpuDok<T>>();
91  auto& func_apply = op_apply->function;
92 
93  assert(p_dok_R->Ax.empty());
94 
95  p_dok_R->Ax.reserve(p_dok_M->Ax.size());
96 
97  for (const auto& entry : p_dok_M->Ax) {
98  p_dok_R->Ax[{entry.first.second, entry.first.first}] = func_apply(entry.second);
99  }
100 
101  p_dok_R->values = p_dok_M->values;
102 
103  return Status::Ok;
104  }
105 
106  Status execute_lil(const DispatchContext& ctx) {
107  TIME_PROFILE_SCOPE("cpu/m_transpose_lil");
108 
109  auto t = ctx.task.template cast_safe<ScheduleTask_m_transpose>();
110 
111  ref_ptr<TMatrix<T>> R = t->R.template cast_safe<TMatrix<T>>();
112  ref_ptr<TMatrix<T>> M = t->M.template cast_safe<TMatrix<T>>();
113  ref_ptr<TOpUnary<T, T>> op_apply = t->op_apply.template cast_safe<TOpUnary<T, T>>();
114 
115  R->validate_wd(FormatMatrix::CpuLil);
116  M->validate_rw(FormatMatrix::CpuLil);
117 
118  CpuLil<T>* p_lil_R = R->template get<CpuLil<T>>();
119  const CpuLil<T>* p_lil_M = M->template get<CpuLil<T>>();
120  auto& func_apply = op_apply->function;
121 
122  const uint DM = M->get_n_rows();
123  const uint DN = M->get_n_cols();
124 
125  assert(M->get_n_rows() == R->get_n_cols());
126  assert(M->get_n_cols() == R->get_n_rows());
127 
128  assert(p_lil_R->Ar.size() == DN);
129 
130  for (uint i = 0; i < DM; i++) {
131  for (const auto [j, x] : p_lil_M->Ar[i]) {
132  p_lil_R->Ar[j].emplace_back(i, func_apply(x));
133  }
134  }
135 
136  p_lil_R->values = p_lil_M->values;
137 
138  return Status::Ok;
139  }
140 
141  Status execute_csr(const DispatchContext& ctx) {
142  TIME_PROFILE_SCOPE("cpu/m_transpose_csr");
143 
144  auto t = ctx.task.template cast_safe<ScheduleTask_m_transpose>();
145 
146  ref_ptr<TMatrix<T>> R = t->R.template cast_safe<TMatrix<T>>();
147  ref_ptr<TMatrix<T>> M = t->M.template cast_safe<TMatrix<T>>();
148  ref_ptr<TOpUnary<T, T>> op_apply = t->op_apply.template cast_safe<TOpUnary<T, T>>();
149 
150  R->validate_wd(FormatMatrix::CpuCsr);
151  M->validate_rw(FormatMatrix::CpuCsr);
152 
153  CpuCsr<T>* p_csr_R = R->template get<CpuCsr<T>>();
154  const CpuCsr<T>* p_csr_M = M->template get<CpuCsr<T>>();
155  auto& func_apply = op_apply->function;
156 
157  const uint DM = M->get_n_rows();
158  const uint DN = M->get_n_cols();
159 
160  assert(M->get_n_rows() == R->get_n_cols());
161  assert(M->get_n_cols() == R->get_n_rows());
162 
163  std::vector<uint> sizes(DN + 1, 0);
164 
165  for (uint i = 0; i < DM; i++) {
166  for (uint k = p_csr_M->Ap[i]; k < p_csr_M->Ap[i + 1]; k++) {
167  uint j = p_csr_M->Aj[k];
168  sizes[j] += 1;
169  }
170  }
171 
172  cpu_csr_resize(DN, uint(p_csr_M->Ax.size()), *p_csr_R);
173  std::exclusive_scan(sizes.begin(), sizes.end(), p_csr_R->Ap.begin(), 0);
174 
175  std::vector<uint> offsets(DN, 0);
176 
177  for (uint i = 0; i < DM; i++) {
178  for (uint k = p_csr_M->Ap[i]; k < p_csr_M->Ap[i + 1]; k++) {
179  uint j = p_csr_M->Aj[k];
180  T x = p_csr_M->Ax[k];
181 
182  p_csr_R->Aj[p_csr_R->Ap[j] + offsets[j]] = i;
183  p_csr_R->Ax[p_csr_R->Ap[j] + offsets[j]] = func_apply(x);
184 
185  offsets[j] += 1;
186  }
187  }
188 
189  p_csr_R->values = p_csr_M->values;
190 
191  return Status::Ok;
192  }
193  };
194 
195 }// namespace spla
196 
197 #endif//SPLA_CPU_M_TRANSPOSE_HPP
Status of library operation execution.
Definition: cpu_m_transpose.hpp:47
std::string get_name() override
Definition: cpu_m_transpose.hpp:51
~Algo_m_transpose_cpu() override=default
Status execute(const DispatchContext &ctx) override
Definition: cpu_m_transpose.hpp:59
std::string get_description() override
Definition: cpu_m_transpose.hpp:55
Dictionary of keys sparse matrix format.
Definition: cpu_formats.hpp:128
robin_hood::unordered_flat_map< Key, T, pair_hash > Ax
Definition: cpu_formats.hpp:137
Algorithm suitable to process schedule task based on task string key.
Definition: registry.hpp:66
uint values
Definition: tdecoration.hpp:58
Automates reference counting and behaves as shared smart pointer.
Definition: ref.hpp:117
void cpu_csr_resize(const uint n_rows, const uint n_values, CpuCsr< T > &storage)
Definition: cpu_format_csr.hpp:41
std::uint32_t uint
Library index and size type.
Definition: config.hpp:56
Definition: algorithm.hpp:37
Execution context of a single task.
Definition: dispatcher.hpp:46
ref_ptr< ScheduleTask > task
Definition: dispatcher.hpp:48
#define TIME_PROFILE_SCOPE(name)
Definition: time_profiler.hpp:92