OGS
MatrixElementCache.h
Go to the documentation of this file.
1
11#pragma once
12
13#include <algorithm>
14
18
20{
21namespace detail
22{
23#ifdef USE_PETSC
25 GlobalIndexType const n)
26{
27 if (i == -n)
28 {
29 return 0;
30 }
31 else
32 {
33 return std::abs(i);
34 }
35}
36#else
38 GlobalIndexType const /*n*/)
39{
40 return i;
41}
42#endif
43} // namespace detail
44
45template <std::size_t Dim>
47{
51
52 MatrixElementCacheEntry(std::array<GlobalIndexType, Dim> const& indices,
53 double value)
55 {
56 }
57
59 default;
61
62 std::array<GlobalIndexType, Dim> indices;
63 double value;
64};
65
66template <std::size_t Dim>
68{
69 static_assert(Dim == 1 || Dim == 2);
71 std::conditional_t<Dim == 1, GlobalVector, GlobalMatrix>;
72
73public:
75 : mat_or_vec_{mat_or_vec}
76 {
77 }
78
79 void add(std::vector<MatrixElementCacheEntry<Dim>> const& entries)
80 {
81 std::lock_guard<std::mutex> const lock(mutex_);
82
83 if constexpr (Dim == 2)
84 {
85 auto const n_cols = mat_or_vec_.getNumberOfColumns();
86
87 // TODO would be more efficient if our global matrix and vector
88 // implementations supported batch addition of matrix elements with
89 // arbitrary indices (not restricted to (n x m) shaped submatrices).
90 for (auto const [rc, value] : entries)
91 {
92 auto const [r, c] = rc;
93
94 auto const c_no_ghost =
96
97 mat_or_vec_.add(r, c_no_ghost, value);
98 }
99 }
100 else
101 {
102 // TODO batch addition would be more efficient. That needs the
103 // refactoring of the matrix element cache.
104 for (auto const [r, value] : entries)
105 {
106 mat_or_vec_.add(r.front(), value);
107 }
108 }
109 }
110
111private:
112 std::mutex mutex_;
114};
115
118
119template <std::size_t Dim>
121{
122 static_assert(Dim == 1 || Dim == 2);
124
125 static constexpr std::size_t cache_capacity = 1'000'000;
126
127public:
129 : mat_or_vec_(mat_or_vec), stats_(stats)
130 {
131 cache_.reserve(cache_capacity);
132 }
133
134 void add(std::vector<double> const& local_data,
135 std::vector<GlobalIndexType> const& indices)
136 {
137 addToCache(local_data, indices);
138 }
139
141
142private:
143 void addToCache(std::vector<double> const& values,
144 std::vector<GlobalIndexType> const& indices)
145 {
146 if (values.empty())
147 {
148 return;
149 }
150
151 ensureEnoughSpace(values.size());
152
153 addToCacheImpl(values, indices,
154 std::integral_constant<std::size_t, Dim>{});
155 }
156
157 // Overload for vectors.
158 void addToCacheImpl(std::vector<double> const& values,
159 std::vector<GlobalIndexType> const& indices,
160 std::integral_constant<std::size_t, 1>)
161 {
162 auto const num_r_c = indices.size();
163
164 for (std::size_t r_local = 0; r_local < num_r_c; ++r_local)
165 {
166 ++stats_.count;
167 auto const value = values[r_local];
168
169 if (value == 0)
170 {
171 continue;
172 }
173 else
174 {
176 }
177
178 auto const r_global = indices[r_local];
179 cache_.emplace_back(std::array{r_global}, value);
180 }
181 }
182
183 // Overload for matrices.
184 void addToCacheImpl(std::vector<double> const& values,
185 std::vector<GlobalIndexType> const& indices,
186 std::integral_constant<std::size_t, 2>)
187 {
188 auto const num_r_c = indices.size();
189
190 // Note: There is an implicit storage order assumption, here!
191 auto const local_mat = MathLib::toMatrix(values, num_r_c, num_r_c);
192
193 for (std::size_t r_local = 0; r_local < num_r_c; ++r_local)
194 {
195 auto const r_global = indices[r_local];
196
197 for (std::size_t c_local = 0; c_local < num_r_c; ++c_local)
198 {
199 ++stats_.count;
200 auto const value = local_mat(r_local, c_local);
201
202 // TODO skipping zero values sometimes does not work together
203 // with the Eigen SparseLU linear solver. See also
204 // https://gitlab.opengeosys.org/ogs/ogs/-/merge_requests/4556#note_125561
205#if 0
206 if (value == 0)
207 {
208 continue;
209 }
210#endif
212
213 auto const c_global = indices[c_local];
214 cache_.emplace_back(std::array{r_global, c_global}, value);
215 }
216 }
217 }
218
219 void ensureEnoughSpace(std::size_t const space_needed)
220 {
221 auto const size_initial = cache_.size();
222 auto const cap_initial = cache_.capacity();
223
224 if (size_initial + space_needed <= cap_initial)
225 {
226 return;
227 }
228
229 addToGlobal();
230
231 // ensure again that there is enough capacity (corner case, if initial
232 // capacity is too small because of whatever arcane reason)
233 auto const size_after = cache_.size();
234 auto const cap_after = cache_.capacity();
235
236 if (size_after + space_needed > cap_after)
237 {
238 cache_.reserve(size_after + 2 * space_needed);
239 }
240 }
241
243 {
245 stats_.count_global += cache_.size();
246 cache_.clear();
247 }
248
249 std::vector<MatrixElementCacheEntry<Dim>> cache_;
252};
253
255{
258
259public:
262 MultiStats& stats)
263 : cache_M_(M, stats.M),
264 cache_K_(K, stats.K),
265 cache_b_(b, stats.b),
266 cache_Jac_(Jac, stats.Jac)
267 {
268 }
269
270 void add(std::vector<double> const& local_M_data,
271 std::vector<double> const& local_K_data,
272 std::vector<double> const& local_b_data,
273 std::vector<double> const& local_Jac_data,
274 std::vector<GlobalIndexType> const& indices)
275 {
276 cache_M_.add(local_M_data, indices);
277 cache_K_.add(local_K_data, indices);
278 cache_b_.add(local_b_data, indices);
279 cache_Jac_.add(local_Jac_data, indices);
280 }
281
282private:
287};
288} // namespace ProcessLib::Assembly
GlobalMatrix::IndexType GlobalIndexType
Global vector based on Eigen vector.
Definition EigenVector.h:25
std::conditional_t< Dim==1, GlobalVector, GlobalMatrix > GlobalMatOrVec
ConcurrentMatrixView(GlobalMatOrVec &mat_or_vec)
void add(std::vector< MatrixElementCacheEntry< Dim > > const &entries)
void addToCacheImpl(std::vector< double > const &values, std::vector< GlobalIndexType > const &indices, std::integral_constant< std::size_t, 2 >)
static constexpr std::size_t cache_capacity
void addToCacheImpl(std::vector< double > const &values, std::vector< GlobalIndexType > const &indices, std::integral_constant< std::size_t, 1 >)
void addToCache(std::vector< double > const &values, std::vector< GlobalIndexType > const &indices)
void add(std::vector< double > const &local_data, std::vector< GlobalIndexType > const &indices)
MatrixElementCache(GlobalMatView &mat_or_vec, Stats &stats)
std::vector< MatrixElementCacheEntry< Dim > > cache_
void ensureEnoughSpace(std::size_t const space_needed)
void add(std::vector< double > const &local_M_data, std::vector< double > const &local_K_data, std::vector< double > const &local_b_data, std::vector< double > const &local_Jac_data, std::vector< GlobalIndexType > const &indices)
MultiMatrixElementCache(GlobalMatrixView &M, GlobalMatrixView &K, GlobalVectorView &b, GlobalMatrixView &Jac, MultiStats &stats)
Eigen::Map< const Matrix > toMatrix(std::vector< double > const &data, Eigen::MatrixXd::Index rows, Eigen::MatrixXd::Index cols)
GlobalIndexType transformToNonGhostIndex(GlobalIndexType const i, GlobalIndexType const n)
ConcurrentMatrixView(GlobalVector &) -> ConcurrentMatrixView< 1 >
MatrixElementCacheEntry & operator=(MatrixElementCacheEntry const &)=default
MatrixElementCacheEntry(MatrixElementCacheEntry &&)=default
std::array< GlobalIndexType, Dim > indices
MatrixElementCacheEntry & operator=(MatrixElementCacheEntry &&)=default
MatrixElementCacheEntry(std::array< GlobalIndexType, Dim > const &indices, double value)
MatrixElementCacheEntry(MatrixElementCacheEntry const &)=default