spla
Loading...
Searching...
No Matches
cpu_m_eadd.hpp
Go to the documentation of this file.
1/**********************************************************************************/
2/* This file is part of spla project */
3/* https://github.com/JetBrains-Research/spla */
4/**********************************************************************************/
5/* MIT License */
6/* */
7/* Copyright (c) 2023 SparseLinearAlgebra */
8/* */
9/* Permission is hereby granted, free of charge, to any person obtaining a copy */
10/* of this software and associated documentation files (the "Software"), to deal */
11/* in the Software without restriction, including without limitation the rights */
12/* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell */
13/* copies of the Software, and to permit persons to whom the Software is */
14/* furnished to do so, subject to the following conditions: */
15/* */
16/* The above copyright notice and this permission notice shall be included in all */
17/* copies or substantial portions of the Software. */
18/* */
19/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR */
20/* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, */
21/* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE */
22/* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER */
23/* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, */
24/* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE */
25/* SOFTWARE. */
26/**********************************************************************************/
27
28#ifndef SPLA_CPU_M_EADD_HPP
29#define SPLA_CPU_M_EADD_HPP
30
32
33#include <core/dispatcher.hpp>
34#include <core/registry.hpp>
35#include <core/tmatrix.hpp>
36#include <core/top.hpp>
37#include <core/tscalar.hpp>
38#include <core/ttype.hpp>
39
40namespace spla {
41
42 template<typename T>
43 class Algo_m_eadd_cpu final : public RegistryAlgo {
44 public:
45 ~Algo_m_eadd_cpu() override = default;
46
47 std::string get_name() override {
48 return "m_eadd";
49 }
50
51 std::string get_description() override {
52 return "sequential element-wise add matrix operation";
53 }
54
55 Status execute(const DispatchContext& ctx) override {
56 auto t = ctx.task.template cast_safe<ScheduleTask_m_eadd>();
57 ref_ptr<TMatrix<T>> A = t->A.template cast_safe<TMatrix<T>>();
58 ref_ptr<TMatrix<T>> B = t->B.template cast_safe<TMatrix<T>>();
59
60 if (A->is_valid(FormatMatrix::CpuLil) && B->is_valid(FormatMatrix::CpuLil)) {
61 return execute_lil(ctx);
62 }
63
64 return execute_lil(ctx);
65 }
66
67 private:
68 Status execute_lil(const DispatchContext& ctx) {
69 TIME_PROFILE_SCOPE("cpu/matrix_eadd_lil");
70
71 auto t = ctx.task.template cast_safe<ScheduleTask_m_eadd>();
72 ref_ptr<TMatrix<T>> R = t->R.template cast_safe<TMatrix<T>>();
73 ref_ptr<TMatrix<T>> A = t->A.template cast_safe<TMatrix<T>>();
74 ref_ptr<TMatrix<T>> B = t->B.template cast_safe<TMatrix<T>>();
75 ref_ptr<TOpBinary<T, T, T>> op = t->op.template cast_safe<TOpBinary<T, T, T>>();
76
77 R->validate_wd(FormatMatrix::CpuLil);
78 A->validate_rw(FormatMatrix::CpuLil);
79 B->validate_rw(FormatMatrix::CpuLil);
80
81 auto* p_R = R->template get<CpuLil<T>>();
82 const auto* p_A = A->template get<CpuLil<T>>();
83 const auto* p_B = B->template get<CpuLil<T>>();
84 const auto& function = op->function;
85
86 const uint N = R->get_n_rows();
87 const auto fill_value_R = R->get_fill_value();
88
89 p_R->values = 0;
90
91 for (uint i = 0; i < N; i++) {
92 auto& row_R = p_R->Ar[i];
93 const auto& row_A = p_A->Ar[i];
94 const auto& row_B = p_B->Ar[i];
95
96 auto iter_A = row_A.begin();
97 auto iter_B = row_B.begin();
98
99 auto end_A = row_A.end();
100 auto end_B = row_B.end();
101
102 while (iter_A != end_A && iter_B != end_B) {
103 const auto [i_A, x_A] = *iter_A;
104 const auto [i_B, x_B] = *iter_B;
105
106 T r;
107 uint j;
108
109 if (i_A < i_B) {
110 r = x_A;
111 j = i_A;
112 ++iter_A;
113 } else if (i_B < i_A) {
114 r = x_B;
115 j = i_B;
116 ++iter_B;
117 } else {
118 r = function(x_A, x_B);
119 j = i_A;
120 ++iter_A;
121 ++iter_B;
122 }
123
124 if (r != fill_value_R) {
125 row_R.emplace_back(j, r);
126 p_R->values += 1;
127 }
128 }
129 while (iter_A != end_A) {
130 const auto [i_A, x_A] = *iter_A;
131 auto r = x_A;
132 if (r != fill_value_R) {
133 row_R.emplace_back(i_A, r);
134 p_R->values += 1;
135 }
136 ++iter_A;
137 }
138 while (iter_B != end_B) {
139 const auto [i_B, x_B] = *iter_B;
140 auto r = x_B;
141 if (r != fill_value_R) {
142 row_R.emplace_back(i_B, r);
143 p_R->values += 1;
144 }
145 ++iter_B;
146 }
147 }
148
149 return Status::Ok;
150 }
151 };
152
153}// namespace spla
154
155#endif//SPLA_CPU_M_EADD_HPP
Status of library operation execution.
Definition cpu_m_eadd.hpp:43
~Algo_m_eadd_cpu() override=default
Status execute(const DispatchContext &ctx) override
Definition cpu_m_eadd.hpp:55
std::string get_description() override
Definition cpu_m_eadd.hpp:51
std::string get_name() override
Definition cpu_m_eadd.hpp:47
Algorithm suitable to process schedule task based on task string key.
Definition registry.hpp:66
Automates reference counting and behaves as shared smart pointer.
Definition ref.hpp:117
std::uint32_t uint
Library index and size type.
Definition config.hpp:56
Definition algorithm.hpp:37
Execution context of a single task.
Definition dispatcher.hpp:46
ref_ptr< ScheduleTask > task
Definition dispatcher.hpp:48
#define TIME_PROFILE_SCOPE(name)
Definition time_profiler.hpp:92