spla
Loading...
Searching...
No Matches
cpu_vxm.hpp
Go to the documentation of this file.
1/**********************************************************************************/
2/* This file is part of spla project */
3/* https://github.com/SparseLinearAlgebra/spla */
4/**********************************************************************************/
5/* MIT License */
6/* */
7/* Copyright (c) 2023 SparseLinearAlgebra */
8/* */
9/* Permission is hereby granted, free of charge, to any person obtaining a copy */
10/* of this software and associated documentation files (the "Software"), to deal */
11/* in the Software without restriction, including without limitation the rights */
12/* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell */
13/* copies of the Software, and to permit persons to whom the Software is */
14/* furnished to do so, subject to the following conditions: */
15/* */
16/* The above copyright notice and this permission notice shall be included in all */
17/* copies or substantial portions of the Software. */
18/* */
19/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR */
20/* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, */
21/* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE */
22/* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER */
23/* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, */
24/* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE */
25/* SOFTWARE. */
26/**********************************************************************************/
27
28#ifndef SPLA_CPU_VXM_HPP
29#define SPLA_CPU_VXM_HPP
30
32
33#include <core/dispatcher.hpp>
34#include <core/registry.hpp>
35#include <core/tmatrix.hpp>
36#include <core/top.hpp>
37#include <core/tscalar.hpp>
38#include <core/ttype.hpp>
39#include <core/tvector.hpp>
40
41#include <robin_hood.hpp>
42
43namespace spla {
44
45 template<typename T>
46 class Algo_vxm_masked_cpu final : public RegistryAlgo {
47 public:
48 ~Algo_vxm_masked_cpu() override = default;
49
50 std::string get_name() override {
51 return "vxm_masked";
52 }
53
54 std::string get_description() override {
55 return "sequential masked vector-matrix product on cpu";
56 }
57
58 Status execute(const DispatchContext& ctx) override {
59 TIME_PROFILE_SCOPE("cpu/vxm");
60
61 auto t = ctx.task.template cast_safe<ScheduleTask_vxm_masked>();
62
63 auto r = t->r.template cast_safe<TVector<T>>();
64 auto mask = t->mask.template cast_safe<TVector<T>>();
65 auto v = t->v.template cast_safe<TVector<T>>();
66 auto M = t->M.template cast_safe<TMatrix<T>>();
67 auto op_multiply = t->op_multiply.template cast_safe<TOpBinary<T, T, T>>();
68 auto op_add = t->op_add.template cast_safe<TOpBinary<T, T, T>>();
69 auto op_select = t->op_select.template cast_safe<TOpSelect<T>>();
70 auto init = t->init.template cast_safe<TScalar<T>>();
71
72 const T sum_init = init->get_value();
73
74 r->validate_wd(FormatVector::CpuCoo);
75 mask->validate_rw(FormatVector::CpuDense);
76 v->validate_rw(FormatVector::CpuCoo);
77 M->validate_rw(FormatMatrix::CpuLil);
78
79 CpuCooVec<T>* p_sparse_r = r->template get<CpuCooVec<T>>();
80 const CpuDenseVec<T>* p_dense_mask = mask->template get<CpuDenseVec<T>>();
81 const CpuCooVec<T>* p_sparse_v = v->template get<CpuCooVec<T>>();
82 const CpuLil<T>* p_lil_M = M->template get<CpuLil<T>>();
83
84 auto& func_multiply = op_multiply->function;
85 auto& func_add = op_add->function;
86 auto& func_select = op_select->function;
87
88 const uint N = p_sparse_v->values;
89
90 robin_hood::unordered_flat_map<uint, T> r_tmp;
91
92 for (uint idx = 0; idx < N; ++idx) {
93 const uint v_i = p_sparse_v->Ai[idx];
94 const T v_x = p_sparse_v->Ax[idx];
95
96 const auto& row = p_lil_M->Ar[v_i];
97
98 for (const auto& j_x : row) {
99 const uint j = j_x.first;
100
101 if (func_select(p_dense_mask->Ax[j])) {
102 auto r_x = r_tmp.find(j);
103
104 if (r_x != r_tmp.end())
105 r_x->second = func_add(r_x->second, func_multiply(v_x, j_x.second));
106 else
107 r_tmp[j] = func_multiply(v_x, j_x.second);
108 }
109 }
110 }
111
112 std::vector<std::pair<uint, T>> r_entries;
113 r_entries.reserve(r_tmp.size());
114 for (const auto& e : r_tmp) {
115 r_entries.emplace_back(e.first, e.second);
116 }
117 std::sort(r_entries.begin(), r_entries.end());
118
119 p_sparse_r->values = uint(r_tmp.size());
120 p_sparse_r->Ai.reserve(r_tmp.size());
121 p_sparse_r->Ax.reserve(r_tmp.size());
122 for (const auto& e : r_entries) {
123 p_sparse_r->Ai.push_back(e.first);
124 p_sparse_r->Ax.push_back(e.second);
125 }
126
127 return Status::Ok;
128 }
129 };
130
131}// namespace spla
132
133#endif//SPLA_CPU_VXM_HPP
Status of library operation execution.
Definition cpu_vxm.hpp:46
~Algo_vxm_masked_cpu() override=default
std::string get_name() override
Definition cpu_vxm.hpp:50
std::string get_description() override
Definition cpu_vxm.hpp:54
Status execute(const DispatchContext &ctx) override
Definition cpu_vxm.hpp:58
CPU list-of-coordinates sparse vector representation.
Definition cpu_formats.hpp:90
std::vector< uint > Ai
Definition cpu_formats.hpp:96
std::vector< T > Ax
Definition cpu_formats.hpp:97
CPU one-dim array for dense vector representation.
Definition cpu_formats.hpp:74
CPU list-of-list matrix format for fast incremental build.
Definition cpu_formats.hpp:107
Algorithm suitable to process schedule task based on task string key.
Definition registry.hpp:66
uint values
Definition tdecoration.hpp:58
std::uint32_t uint
Library index and size type.
Definition config.hpp:56
Definition algorithm.hpp:37
Execution context of a single task.
Definition dispatcher.hpp:46
ref_ptr< ScheduleTask > task
Definition dispatcher.hpp:48
#define TIME_PROFILE_SCOPE(name)
Definition time_profiler.hpp:92