v0.15.0
Loading...
Searching...
No Matches
Classes | Namespaces | Macros | Typedefs
MatrixManager.cpp File Reference

Go to the source code of this file.

Classes

struct  MoFEM::CreateRowCompressedADJMatrix
 Create compressed matrix. More...
 

Namespaces

namespace  MoFEM
 implementation of Data Operators for Forces and Sources
 

Macros

#define MatrixManagerFunctionBegin
 Create adjacent matrices using different indices.
 

Typedefs

using MoFEM::CreateRowComressedADJMatrix = CreateRowCompressedADJMatrix
 

Macro Definition Documentation

◆ MatrixManagerFunctionBegin

#define MatrixManagerFunctionBegin
Value:
MOFEM_LOG_CHANNEL("WORLD"); \
MOFEM_LOG_CHANNEL("SYNC"); \
MOFEM_LOG_FUNCTION(); \
MOFEM_LOG_TAG("SYNC", "MatrixManager"); \
MOFEM_LOG_TAG("WORLD", "MatrixManager")
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...

Create adjacent matrices using different indices.

Definition at line 6 of file MatrixManager.cpp.

13 {
14
15/** \brief Create compressed matrix
16
17 \note Only function class members are allowed in this class. NO VARIABLES.
18
19 \todo It is obsolete implementation, code should be moved to interface
20 MatrixManager.
21
22 \todo While matrix is created is assumed that all entities on element are
23 adjacent to each other, in some cases, this is created denser matrices than it
24 should be. Some kind of filtering can be added.
25
26 \todo Creation of the block matrices
27
28 \todo Some efficiency improvement are possible
29
30
31 */
32struct CreateRowCompressedADJMatrix : public Core {
33
34 CreateRowCompressedADJMatrix(moab::Interface &moab,
35 MPI_Comm comm = PETSC_COMM_WORLD,
36 int verbose = 1)
37 : Core(moab, comm, verbose) {}
38
39 using ProblemsByName = Problem_multiIndex::index<Problem_mi_tag>::type;
40
41 /** \brief Create matrix adjacencies
42
43 Depending on TAG type, which some structure used to number dofs, matrix is
44 partitioned using part number stored in multi-index, or is partitioned on
45 parts based only on index number.
46
47 See: Idx_mi_tag PetscGlobalIdx_mi_tag and PetscLocalIdx_mi_tag
48
49 */
50 template <typename TAG>
52 createMatArrays(ProblemsByName::iterator p_miit, const MatType type,
53 std::vector<PetscInt> &i, std::vector<PetscInt> &j,
54 const bool no_diagonals = true, int verb = QUIET) const;
55
56private:
57 using AdjByEnt = FieldEntityEntFiniteElementAdjacencyMap_multiIndex::index<
58 Unique_mi_tag>::type;
59
60 using DofByGlobalPetscIndex =
61 NumeredDofEntity_multiIndex::index<PetscGlobalIdx_mi_tag>::type;
62
63 /** \brief Get element adjacencies
64 */
65 template <typename TAG>
66 MoFEMErrorCode getEntityAdjacencies(
67 ProblemsByName::iterator p_miit,
68 typename boost::multi_index::index<NumeredDofEntity_multiIndex,
69 TAG>::type::iterator mit_row,
70 boost::shared_ptr<FieldEntity> mofem_ent_ptr,
71 std::vector<int> &dofs_col_view, int verb) const;
72};
73
74/**
75 * @deprecated do not use, instead use CreateRowCompressedADJMatrix
76 */
77using CreateRowComressedADJMatrix = CreateRowCompressedADJMatrix;
78
79template <typename TAG>
80MoFEMErrorCode CreateRowCompressedADJMatrix::getEntityAdjacencies(
81 ProblemsByName::iterator p_miit,
82 typename boost::multi_index::index<NumeredDofEntity_multiIndex,
83 TAG>::type::iterator mit_row,
84 boost::shared_ptr<FieldEntity> mofem_ent_ptr,
85 std::vector<int> &dofs_col_view, int verb) const {
87
88 BitRefLevel prb_bit = p_miit->getBitRefLevel();
89 BitRefLevel prb_mask = p_miit->getBitRefLevelMask();
90
91 const EmptyFieldBlocks &empty_field_blocks = p_miit->getEmptyFieldBlocks();
92
93 dofs_col_view.clear();
94 for (auto r = entFEAdjacencies.get<Unique_mi_tag>().equal_range(
95 mofem_ent_ptr->getLocalUniqueId());
96 r.first != r.second; ++r.first) {
97
98 if (r.first->byWhat & BYROW) {
99
100 if ((r.first->entFePtr->getId() & p_miit->getBitFEId()).none()) {
101 // if element is not part of problem
102 continue;
103 }
104
105 const BitRefLevel &fe_bit = r.first->entFePtr->getBitRefLevel();
106 // if entity is not problem refinement level
107 if ((fe_bit & prb_bit).any() && (fe_bit & prb_mask) == fe_bit) {
108
109 auto check_block = [&empty_field_blocks](auto &r_if, auto &col_id) {
110 for (auto &b : empty_field_blocks) {
111 if ((b.first & r_if).any() && (b.second & col_id).any()) {
112 return false;
113 }
114 }
115 return true;
116 };
117
118 for (auto &it : r.first->entFePtr->getColFieldEnts()) {
119 if (auto e = it.lock()) {
120
121 if (check_block((*mit_row)->getId(), e->getId())) {
122
123 if (auto cache = e->entityCacheColDofs.lock()) {
124 const auto lo = cache->loHi[0];
125 const auto hi = cache->loHi[1];
126 for (auto vit = lo; vit != hi; ++vit) {
127
128 const int idx = TAG::get_index(vit);
129 if (PetscLikely(idx >= 0))
130 dofs_col_view.push_back(idx);
131
132#ifndef NDEBUG
133 const DofIdx nb_dofs_col = p_miit->getNbDofsCol();
134 if (PetscUnlikely(idx >= nb_dofs_col)) {
135 MOFEM_LOG("SELF", Sev::error)
136 << "Problem with dof: " << std::endl
137 << "Rank " << rAnk << " : " << *(*vit);
138 SETERRQ(
139 mofemComm, PETSC_ERR_ARG_SIZ,
140 "Index of dof larger than number of DOFs in column");
141 }
142#endif
143 }
144
145 } else {
146 SETERRQ(mofemComm, MOFEM_DATA_INCONSISTENCY, "Cache not set");
147 }
148 }
149 }
150 }
151 }
152 }
153 }
154
156}
157
158template <typename TAG>
159MoFEMErrorCode CreateRowCompressedADJMatrix::createMatArrays(
160 ProblemsByName::iterator p_miit, const MatType type,
161 std::vector<PetscInt> &i, std::vector<PetscInt> &j, const bool no_diagonals,
162 int verb) const {
164
165 PetscLogEventBegin(MOFEM_EVENT_createMat, 0, 0, 0, 0);
166
167 auto cache = boost::make_shared<std::vector<EntityCacheNumeredDofs>>(
168 entsFields.size());
169
170 size_t idx = 0;
171 for (auto it = entsFields.begin(); it != entsFields.end(); ++it, ++idx) {
172
173 const auto uid = (*it)->getLocalUniqueId();
174 auto r = entFEAdjacencies.get<Unique_mi_tag>().equal_range(uid);
175 for (auto lo = r.first; lo != r.second; ++lo) {
176
177 if ((lo->getBitFEId() & p_miit->getBitFEId()).any()) {
178
179 const BitRefLevel &prb_bit = p_miit->getBitRefLevel();
180 const BitRefLevel &prb_mask = p_miit->getBitRefLevelMask();
181 const BitRefLevel &fe_bit = lo->entFePtr->getBitRefLevel();
182
183 // if entity is not problem refinement level
184 if ((fe_bit & prb_mask) != fe_bit)
185 continue;
186 if ((fe_bit & prb_bit).none())
187 continue;
188
189 auto dit = p_miit->numeredColDofsPtr->lower_bound(uid);
190
191 decltype(dit) hi_dit;
192 if (dit != p_miit->numeredColDofsPtr->end())
193 hi_dit = p_miit->numeredColDofsPtr->upper_bound(
194 uid | static_cast<UId>(MAX_DOFS_ON_ENTITY - 1));
195 else
196 hi_dit = dit;
197
198 (*it)->entityCacheColDofs =
199 boost::shared_ptr<EntityCacheNumeredDofs>(cache, &((*cache)[idx]));
200 (*cache)[idx].loHi = {dit, hi_dit};
201
202 break;
203 }
204 }
205 }
206
207 using NumeredDofEntitysByIdx =
208 typename boost::multi_index::index<NumeredDofEntity_multiIndex,
209 TAG>::type;
210
211 // Get multi-indices for rows and columns
212 const NumeredDofEntitysByIdx &dofs_row_by_idx =
213 p_miit->numeredRowDofsPtr->get<TAG>();
214 int nb_dofs_row = p_miit->getNbDofsRow();
215 if (nb_dofs_row == 0) {
216 SETERRQ(mofemComm, MOFEM_DATA_INCONSISTENCY, "problem <%s> has zero rows",
217 p_miit->getName().c_str());
218 }
219
220 // Get adjacencies form other processors
221 std::map<int, std::vector<int>> adjacent_dofs_on_other_parts;
222
223 // If not partitioned set petsc layout for matrix. If partitioned need to get
224 // adjacencies form other parts. Note if algebra is only partitioned no need
225 // to collect adjacencies form other entities. Those are already on mesh
226 // which is assumed that is on each processor the same.
227 typename boost::multi_index::index<NumeredDofEntity_multiIndex,
228 TAG>::type::iterator miit_row,
229 hi_miit_row;
230
231 if (TAG::IamNotPartitioned) {
232
233 // Get range of local indices
234 PetscLayout layout;
235 CHKERR PetscLayoutCreate(mofemComm, &layout);
236 CHKERR PetscLayoutSetBlockSize(layout, 1);
237 CHKERR PetscLayoutSetSize(layout, nb_dofs_row);
238 CHKERR PetscLayoutSetUp(layout);
239 PetscInt rstart, rend;
240 CHKERR PetscLayoutGetRange(layout, &rstart, &rend);
241 CHKERR PetscLayoutDestroy(&layout);
242
243 if (verb >= VERBOSE) {
244 MOFEM_LOG("SYNC", Sev::noisy)
245 << "row lower " << rstart << " row upper " << rend;
246 MOFEM_LOG_SYNCHRONISE(mofemComm)
247 }
248
249 miit_row = dofs_row_by_idx.lower_bound(rstart);
250 hi_miit_row = dofs_row_by_idx.lower_bound(rend);
251 if (std::distance(miit_row, hi_miit_row) != rend - rstart) {
252 SETERRQ(
253 mofemComm, PETSC_ERR_ARG_SIZ,
254 "data inconsistency, std::distance(miit_row,hi_miit_row) != rend - "
255 "rstart (%ld != %d - %d = %d) ",
256 std::distance(miit_row, hi_miit_row), rend, rstart, rend - rstart);
257 }
258
259 } else {
260
261 MPI_Comm comm;
262 // // Make sure it is a PETSc mofemComm
263 CHKERR PetscCommDuplicate(get_comm(), &comm, NULL);
264
265 // get adjacent nodes on other partitions
266 std::vector<std::vector<int>> dofs_vec(sIze);
267
268 boost::shared_ptr<FieldEntity> mofem_ent_ptr;
269 std::vector<int> dofs_col_view;
270
271 typename boost::multi_index::index<NumeredDofEntity_multiIndex,
272 TAG>::type::iterator mit_row,
273 hi_mit_row;
274
275 mit_row = dofs_row_by_idx.begin();
276 hi_mit_row = dofs_row_by_idx.end();
277 for (; mit_row != hi_mit_row; mit_row++) {
278
279 // Shared or multi-shared row and not owned. Those data should be send to
280 // other side.
281
282 // Get entity adjacencies, no need to repeat that operation for dofs when
283 // are on the same entity. For simplicity is assumed that those sheered
284 // the same adjacencies.
285 unsigned char pstatus = (*mit_row)->getPStatus();
286 if ((pstatus & PSTATUS_NOT_OWNED) &&
287 (pstatus & (PSTATUS_SHARED | PSTATUS_MULTISHARED))) {
288
289 bool get_adj_col = true;
290 if (mofem_ent_ptr) {
291 if (mofem_ent_ptr->getLocalUniqueId() ==
292 (*mit_row)->getFieldEntityPtr()->getLocalUniqueId()) {
293 get_adj_col = false;
294 }
295 }
296
297 if (get_adj_col) {
298 // Get entity adjacencies
299 mofem_ent_ptr = (*mit_row)->getFieldEntityPtr();
300 CHKERR getEntityAdjacencies<TAG>(p_miit, mit_row, mofem_ent_ptr,
301 dofs_col_view, verb);
302 // Sort, unique and resize dofs_col_view
303 {
304 std::sort(dofs_col_view.begin(), dofs_col_view.end());
305 std::vector<int>::iterator new_end =
306 std::unique(dofs_col_view.begin(), dofs_col_view.end());
307 int new_size = std::distance(dofs_col_view.begin(), new_end);
308 dofs_col_view.resize(new_size);
309 }
310 // Add that row. Patterns is that first index is row index, second is
311 // size of adjacencies after that follows column adjacencies.
312 int owner = (*mit_row)->getOwnerProc();
313 dofs_vec[owner].emplace_back(TAG::get_index(mit_row)); // row index
314 dofs_vec[owner].emplace_back(
315 dofs_col_view.size()); // nb. of column adjacencies
316 // add adjacent cools
317 dofs_vec[owner].insert(dofs_vec[owner].end(), dofs_col_view.begin(),
318 dofs_col_view.end());
319 }
320 }
321 }
322
323 int nsends = 0; // number of messages to send
324 std::vector<int> dofs_vec_length(sIze); // length of the message to proc
325 for (int proc = 0; proc < sIze; proc++) {
326
327 if (!dofs_vec[proc].empty()) {
328
329 dofs_vec_length[proc] = dofs_vec[proc].size();
330 nsends++;
331
332 } else {
333
334 dofs_vec_length[proc] = 0;
335 }
336 }
337
338 std::vector<MPI_Status> status(sIze);
339
340 // Computes the number of messages a node expects to receive
341 int nrecvs; // number of messages received
342 CHKERR PetscGatherNumberOfMessages(comm, NULL, &dofs_vec_length[0],
343 &nrecvs);
344
345 // Computes info about messages that a MPI-node will receive, including
346 // (from-id,length) pairs for each message.
347 int *onodes; // list of node-ids from which messages are expected
348 int *olengths; // corresponding message lengths
349 CHKERR PetscGatherMessageLengths(comm, nsends, nrecvs, &dofs_vec_length[0],
350 &onodes, &olengths);
351
352 // Gets a unique new tag from a PETSc communicator.
353 int tag;
354 CHKERR PetscCommGetNewTag(comm, &tag);
355
356 // Allocate a buffer sufficient to hold messages of size specified in
357 // olengths. And post Irecvs on these buffers using node info from onodes
358 int **rbuf; // must bee freed by user
359 MPI_Request *r_waits; // must bee freed by user
360
361 // rbuf has a pointers to messages. It has size of of nrecvs (number of
362 // messages) +1. In the first index a block is allocated,
363 // such that rbuf[i] = rbuf[i-1]+olengths[i-1].
364 CHKERR PetscPostIrecvInt(comm, tag, nrecvs, onodes, olengths, &rbuf,
365 &r_waits);
366
367 MPI_Request *s_waits; // status of sens messages
368 CHKERR PetscMalloc1(nsends, &s_waits);
369
370 // Send messages
371 for (int proc = 0, kk = 0; proc < sIze; proc++) {
372 if (!dofs_vec_length[proc])
373 continue; // no message to send to this proc
374 CHKERR MPI_Isend(&(dofs_vec[proc])[0], // buffer to send
375 dofs_vec_length[proc], // message length
376 MPIU_INT, proc, // to proc
377 tag, comm, s_waits + kk);
378 kk++;
379 }
380
381 // Wait for received
382 if (nrecvs) {
383 CHKERR MPI_Waitall(nrecvs, r_waits, &status[0]);
384 }
385 // Wait for send messages
386 if (nsends) {
387 CHKERR MPI_Waitall(nsends, s_waits, &status[0]);
388 }
389
390 for (int kk = 0; kk < nrecvs; kk++) {
391
392 int len = olengths[kk];
393 int *data_from_proc = rbuf[kk];
394
395 for (int ii = 0; ii < len;) {
396
397 int row_idx = data_from_proc[ii++]; // get row number
398 int nb_adj_dofs = data_from_proc[ii++]; // get nb. of adjacent dofs
399
400 if (debug) {
401
402 DofByGlobalPetscIndex::iterator dit;
403 dit = p_miit->numeredRowDofsPtr->get<PetscGlobalIdx_mi_tag>().find(
404 row_idx);
405 if (dit ==
406 p_miit->numeredRowDofsPtr->get<PetscGlobalIdx_mi_tag>().end()) {
407 SETERRQ(get_comm(), MOFEM_DATA_INCONSISTENCY,
408 "dof %d can not be found in problem", row_idx);
409 }
410 }
411
412 for (int jj = 0; jj < nb_adj_dofs; jj++) {
413 adjacent_dofs_on_other_parts[row_idx].push_back(data_from_proc[ii++]);
414 }
415 }
416 }
417
418 // Cleaning
419 CHKERR PetscFree(s_waits);
420 CHKERR PetscFree(rbuf[0]);
421 CHKERR PetscFree(rbuf);
422 CHKERR PetscFree(r_waits);
423 CHKERR PetscFree(onodes);
424 CHKERR PetscFree(olengths);
425
426 miit_row = dofs_row_by_idx.begin();
427 hi_miit_row = dofs_row_by_idx.end();
428
429 CHKERR PetscCommDestroy(&comm);
430 }
431
432 boost::shared_ptr<FieldEntity> mofem_ent_ptr;
433 int row_last_evaluated_idx = -1;
434
435 std::vector<int> dofs_vec;
436 std::vector<int> dofs_col_view;
437
438 // loop local rows
439 int nb_loc_row_from_iterators = 0;
440 unsigned int rows_to_fill = p_miit->getNbLocalDofsRow();
441 i.reserve(rows_to_fill + 1);
442 for (; miit_row != hi_miit_row; miit_row++) {
443
444 if (!TAG::IamNotPartitioned) {
445 if (static_cast<int>((*miit_row)->getPart()) != rAnk)
446 continue;
447 }
448 // This is only for cross-check if everything is ok
449 nb_loc_row_from_iterators++;
450
451 // add next row to compressed matrix
452 i.push_back(j.size());
453
454 // Get entity adjacencies, no need to repeat that operation for dofs when
455 // are on the same entity. For simplicity is assumed that those share the
456 // same adjacencies.
457 if ((!mofem_ent_ptr)
458 ? 1
459 : (mofem_ent_ptr->getLocalUniqueId() !=
460 (*miit_row)->getFieldEntityPtr()->getLocalUniqueId())) {
461
462 // get entity adjacencies
463 mofem_ent_ptr = (*miit_row)->getFieldEntityPtr();
464 CHKERR getEntityAdjacencies<TAG>(p_miit, miit_row, mofem_ent_ptr,
465 dofs_col_view, verb);
466 row_last_evaluated_idx = TAG::get_index(miit_row);
467
468 dofs_vec.resize(0);
469 // insert dofs_col_view
470 dofs_vec.insert(dofs_vec.end(), dofs_col_view.begin(),
471 dofs_col_view.end());
472
473 unsigned char pstatus = (*miit_row)->getPStatus();
474 if (pstatus > 0) {
475 std::map<int, std::vector<int>>::iterator mit;
476 mit = adjacent_dofs_on_other_parts.find(row_last_evaluated_idx);
477 if (mit == adjacent_dofs_on_other_parts.end()) {
478 // NOTE: Dof can adjacent to other part but no elements are there
479 // which use that dof std::cerr << *miit_row << std::endl; SETERRQ(
480 // get_comm(),MOFEM_DATA_INCONSISTENCY,
481 // "data inconsistency row_last_evaluated_idx = %d",
482 // row_last_evaluated_idx
483 // );
484 } else {
485 dofs_vec.insert(dofs_vec.end(), mit->second.begin(),
486 mit->second.end());
487 }
488 }
489
490 // sort and make unique
491 sort(dofs_vec.begin(), dofs_vec.end());
492 std::vector<int>::iterator new_end =
493 unique(dofs_vec.begin(), dofs_vec.end());
494 int new_size = std::distance(dofs_vec.begin(), new_end);
495 dofs_vec.resize(new_size);
496 }
497
498 // Try to be smart reserving memory
499 if (j.capacity() < j.size() + dofs_vec.size()) {
500
501 // TODO: [CORE-55] Improve algorithm estimating size of compressed matrix
502 unsigned int nb_nonzero = j.size() + dofs_vec.size();
503 unsigned int average_row_fill = nb_nonzero / i.size() + 1;
504 j.reserve(rows_to_fill * average_row_fill);
505 }
506
507 auto hi_diit = dofs_vec.end();
508 for (auto diit = dofs_vec.begin(); diit != hi_diit; diit++) {
509
510 if (no_diagonals) {
511 if (*diit == TAG::get_index(miit_row)) {
512 continue;
513 }
514 }
515 j.push_back(*diit);
516 }
517 }
518
519 // build adj matrix
520 i.push_back(j.size());
521
522 if (strcmp(type, MATMPIADJ) == 0) {
523
524 // Adjacency matrix used to partition problems, f.e. METIS
525 if (i.size() - 1 != (unsigned int)nb_loc_row_from_iterators) {
526 SETERRQ(
527 get_comm(), PETSC_ERR_ARG_SIZ,
528 "Number of rows from iterator is different than size of rows in "
529 "compressed row "
530 "matrix (unsigned int)nb_local_dofs_row != i.size() - 1, i.e. %d != "
531 "%ld",
532 (unsigned int)nb_loc_row_from_iterators, i.size() - 1);
533 }
534
535 } else if (strcmp(type, MATMPIAIJ) == 0) {
536
537 // Compressed MPIADJ matrix
538 if (i.size() - 1 != (unsigned int)nb_loc_row_from_iterators) {
539 SETERRQ(
540 get_comm(), PETSC_ERR_ARG_SIZ,
541 "Number of rows from iterator is different than size of rows in "
542 "compressed row "
543 "matrix (unsigned int)nb_local_dofs_row != i.size() - 1, i.e. %d != "
544 "%ld",
545 (unsigned int)nb_loc_row_from_iterators, i.size() - 1);
546 }
547 PetscInt nb_local_dofs_row = p_miit->getNbLocalDofsRow();
548 if ((unsigned int)nb_local_dofs_row != i.size() - 1) {
549 SETERRQ(
550 get_comm(), PETSC_ERR_ARG_SIZ,
551 "Number of rows is different than size of rows in compressed row "
552 "matrix (unsigned int)nb_local_dofs_row != i.size() - 1, i.e. %d != "
553 "%ld",
554 (unsigned int)nb_local_dofs_row, i.size() - 1);
555 }
556
557 } else if (strcmp(type, MATAIJ) == 0) {
558
559 // Sequential compressed ADJ matrix
560 if (i.size() - 1 != (unsigned int)nb_loc_row_from_iterators) {
561 SETERRQ(
562 get_comm(), PETSC_ERR_ARG_SIZ,
563 "Number of rows form iterator is different than size of rows in "
564 "compressed row "
565 "matrix (unsigned int)nb_local_dofs_row != i.size() - 1, i.e. %d != "
566 "%ld",
567 (unsigned int)nb_loc_row_from_iterators, i.size() - 1);
568 }
569 PetscInt nb_local_dofs_row = p_miit->getNbLocalDofsRow();
570 if ((unsigned int)nb_local_dofs_row != i.size() - 1) {
571 SETERRQ(
572 get_comm(), PETSC_ERR_ARG_SIZ,
573 "Number of rows is different than size of rows in compressed row "
574 "matrix (unsigned int)nb_local_dofs_row != i.size() - 1, i.e. %d != "
575 "%ld",
576 (unsigned int)nb_local_dofs_row, i.size() - 1);
577 }
578
579 } else if (strcmp(type, MATAIJCUSPARSE) == 0) {
580
581 if (i.size() - 1 != (unsigned int)nb_loc_row_from_iterators) {
582 SETERRQ(get_comm(), PETSC_ERR_ARG_SIZ, "data inconsistency");
583 }
584 PetscInt nb_local_dofs_row = p_miit->getNbLocalDofsRow();
585 if ((unsigned int)nb_local_dofs_row != i.size() - 1) {
586 SETERRQ(get_comm(), PETSC_ERR_ARG_SIZ, "data inconsistency");
587 }
588
589 } else {
590
591 SETERRQ(get_comm(), PETSC_ERR_ARG_NULL, "not implemented");
592 }
593
594 PetscLogEventEnd(MOFEM_EVENT_createMat, 0, 0, 0, 0);
596}
597
599MatrixManager::query_interface(boost::typeindex::type_index type_index,
600 UnknownInterface **iface) const {
601 *iface = const_cast<MatrixManager *>(this);
602 return 0;
603}
604
605MatrixManager::MatrixManager(const MoFEM::Core &core)
606 : cOre(const_cast<MoFEM::Core &>(core)) {
607 PetscLogEventRegister("MatMngCrtMPIAIJ", 0, &MOFEM_EVENT_createMPIAIJ);
608 PetscLogEventRegister("MatMngCrtMPIAIJWthArr", 0,
609 &MOFEM_EVENT_createMPIAIJWithArrays);
610 PetscLogEventRegister("MatMngCrtMPIAdjWithArr", 0,
611 &MOFEM_EVENT_createMPIAdjWithArrays);
612 PetscLogEventRegister("MatMngCrtMPIAIJCUSPARSEWthArr", 0,
613 &MOFEM_EVENT_createMPIAIJCUSPARSEWithArrays);
614 PetscLogEventRegister("MatMngCrtSeqAIJCUSPARSEWthArrs", 0,
615 &MOFEM_EVENT_createSeqAIJCUSPARSEWithArrays);
616 PetscLogEventRegister("MatMngCrtSeqAIJWthArrs", 0,
617 &MOFEM_EVENT_createSeqAIJWithArrays);
618 PetscLogEventRegister("MatMngCrtCheckMPIAIJWthArrFillIn", 0,
619 &MOFEM_EVENT_checkMatrixFillIn);
620}
621
622template <>
623MoFEMErrorCode MatrixManager::createMPIAIJWithArrays<PetscGlobalIdx_mi_tag>(
624 const std::string name, Mat *Aij, int verb) {
626
627 MoFEM::CoreInterface &m_field = cOre;
628 CreateRowCompressedADJMatrix *core_ptr =
629 static_cast<CreateRowCompressedADJMatrix *>(&cOre);
630 PetscLogEventBegin(MOFEM_EVENT_createMPIAIJWithArrays, 0, 0, 0, 0);
631
632 auto problems_ptr = m_field.get_problems();
633 auto &prb = problems_ptr->get<Problem_mi_tag>();
634 auto p_miit = prb.find(name);
635 if (p_miit == prb.end()) {
636 SETERRQ(m_field.get_comm(), MOFEM_NOT_FOUND,
637 "problem < %s > is not found (top tip: check spelling)",
638 name.c_str());
639 }
640
641 std::vector<int> i_vec, j_vec;
642 j_vec.reserve(10000);
643 CHKERR core_ptr->createMatArrays<PetscGlobalIdx_mi_tag>(
644 p_miit, MATMPIAIJ, i_vec, j_vec, false, verb);
645
646 int nb_row_dofs = p_miit->getNbDofsRow();
647 int nb_col_dofs = p_miit->getNbDofsCol();
648 int nb_local_dofs_row = p_miit->getNbLocalDofsRow();
649 int nb_local_dofs_col = p_miit->getNbLocalDofsCol();
650
651 CHKERR ::MatCreateMPIAIJWithArrays(
652 m_field.get_comm(), nb_local_dofs_row, nb_local_dofs_col, nb_row_dofs,
653 nb_col_dofs, &*i_vec.begin(), &*j_vec.begin(), PETSC_NULLPTR, Aij);
654
655 PetscLogEventEnd(MOFEM_EVENT_createMPIAIJWithArrays, 0, 0, 0, 0);
657}
658
659template <>
661MatrixManager::createMPIAIJCUSPARSEWithArrays<PetscGlobalIdx_mi_tag>(
662 const std::string name, Mat *Aij, int verb) {
664
665 MoFEM::CoreInterface &m_field = cOre;
666 CreateRowCompressedADJMatrix *core_ptr =
667 static_cast<CreateRowCompressedADJMatrix *>(&cOre);
668 PetscLogEventBegin(MOFEM_EVENT_createMPIAIJCUSPARSEWithArrays, 0, 0, 0, 0);
669
670 auto problems_ptr = m_field.get_problems();
671 auto &prb = problems_ptr->get<Problem_mi_tag>();
672 auto p_miit = prb.find(name);
673 if (p_miit == prb.end()) {
674 SETERRQ(m_field.get_comm(), MOFEM_NOT_FOUND,
675 "problem < %s > is not found (top tip: check spelling)",
676 name.c_str());
677 }
678
679 std::vector<int> i_vec, j_vec;
680 j_vec.reserve(10000);
681 CHKERR core_ptr->createMatArrays<PetscGlobalIdx_mi_tag>(
682 p_miit, MATAIJCUSPARSE, i_vec, j_vec, false, verb);
683
684 int nb_local_dofs_row = p_miit->getNbLocalDofsRow();
685 int nb_local_dofs_col = p_miit->getNbLocalDofsCol();
686
687 auto get_layout = [&]() {
688 int start_ranges, end_ranges;
689 PetscLayout layout;
690 CHKERR PetscLayoutCreate(m_field.get_comm(), &layout);
691 CHKERR PetscLayoutSetBlockSize(layout, 1);
692 CHKERR PetscLayoutSetLocalSize(layout, nb_local_dofs_col);
693 CHKERR PetscLayoutSetUp(layout);
694 CHKERR PetscLayoutGetRange(layout, &start_ranges, &end_ranges);
695 CHKERR PetscLayoutDestroy(&layout);
696 return std::make_pair(start_ranges, end_ranges);
697 };
698
699 auto get_nnz = [&](auto &d_nnz, auto &o_nnz) {
701 auto layout = get_layout();
702 int j = 0;
703 for (int i = 0; i != nb_local_dofs_row; ++i) {
704 for (; j != i_vec[i + 1]; ++j) {
705 if (j_vec[j] < layout.second && j_vec[j] >= layout.first)
706 ++(d_nnz[i]);
707 else
708 ++(o_nnz[i]);
709 }
710 }
712 };
713
714 std::vector<int> d_nnz(nb_local_dofs_row, 0), o_nnz(nb_local_dofs_row, 0);
715 CHKERR get_nnz(d_nnz, o_nnz);
716
717#ifdef PETSC_HAVE_CUDA
718 CHKERR ::MatCreateAIJCUSPARSE(m_field.get_comm(), nb_local_dofs_row,
719 nb_local_dofs_col, nb_row_dofs, nb_col_dofs, 0,
720 &*d_nnz.begin(), 0, &*o_nnz.begin(), Aij);
721#else
722 SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED,
723 "Error: To use this matrix type compile PETSc with CUDA.");
724#endif
725
726 PetscLogEventEnd(MOFEM_EVENT_createMPIAIJCUSPARSEWithArrays, 0, 0, 0, 0);
727
729}
730
731template <>
733MatrixManager::createSeqAIJCUSPARSEWithArrays<PetscLocalIdx_mi_tag>(
734 const std::string name, Mat *Aij, int verb) {
736
737#ifdef PETSC_HAVE_CUDA
738 // CHKERR ::MatCreateSeqAIJCUSPARSE(MPI_Comm comm, PetscInt m, PetscInt n,
739 // PetscInt nz, const PetscInt nnz[], Mat
740 // *A);
741 SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED,
742 "Not implemented type of matrix yet, try MPI version (aijcusparse)");
743#endif
744
746}
747
748template <>
750MatrixManager::createMPIAIJ<PetscGlobalIdx_mi_tag>(const std::string name,
751 Mat *Aij, int verb) {
752 MoFEM::CoreInterface &m_field = cOre;
753 CreateRowCompressedADJMatrix *core_ptr =
754 static_cast<CreateRowCompressedADJMatrix *>(&cOre);
756 PetscLogEventBegin(MOFEM_EVENT_createMPIAIJ, 0, 0, 0, 0);
757
758 auto problems_ptr = m_field.get_problems();
759 auto &prb = problems_ptr->get<Problem_mi_tag>();
760 auto p_miit = prb.find(name);
761 if (p_miit == prb.end()) {
762 SETERRQ(m_field.get_comm(), MOFEM_NOT_FOUND,
763 "problem < %s > is not found (top tip: check spelling)",
764 name.c_str());
765 }
766
767 std::vector<int> i_vec, j_vec;
768 j_vec.reserve(10000);
769 CHKERR core_ptr->createMatArrays<PetscGlobalIdx_mi_tag>(
770 p_miit, MATMPIAIJ, i_vec, j_vec, false, verb);
771
772 int nb_row_dofs = p_miit->getNbDofsRow();
773 int nb_col_dofs = p_miit->getNbDofsCol();
774 int nb_local_dofs_row = p_miit->getNbLocalDofsRow();
775 int nb_local_dofs_col = p_miit->getNbLocalDofsCol();
776
777 auto get_layout = [&]() {
778 int start_ranges, end_ranges;
779 PetscLayout layout;
780 CHKERR PetscLayoutCreate(m_field.get_comm(), &layout);
781 CHKERR PetscLayoutSetBlockSize(layout, 1);
782 CHKERR PetscLayoutSetLocalSize(layout, nb_local_dofs_col);
783 CHKERR PetscLayoutSetUp(layout);
784 CHKERR PetscLayoutGetRange(layout, &start_ranges, &end_ranges);
785 CHKERR PetscLayoutDestroy(&layout);
786 return std::make_pair(start_ranges, end_ranges);
787 };
788
789 auto get_nnz = [&](auto &d_nnz, auto &o_nnz) {
791 auto layout = get_layout();
792 int j = 0;
793 for (int i = 0; i != nb_local_dofs_row; ++i) {
794 for (; j != i_vec[i + 1]; ++j) {
795 if (j_vec[j] < layout.second && j_vec[j] >= layout.first)
796 ++(d_nnz[i]);
797 else
798 ++(o_nnz[i]);
799 }
800 }
802 };
803
804 std::vector<int> d_nnz(nb_local_dofs_row, 0), o_nnz(nb_local_dofs_row, 0);
805 CHKERR get_nnz(d_nnz, o_nnz);
806
807 CHKERR MatCreate(m_field.get_comm(), Aij);
808 CHKERR MatSetSizes(*Aij, nb_local_dofs_row, nb_local_dofs_col, nb_row_dofs,
809 nb_col_dofs);
810 CHKERR MatSetType(*Aij, MATMPIAIJ);
811 CHKERR MatMPIAIJSetPreallocation(*Aij, 0, &*d_nnz.begin(), 0,
812 &*o_nnz.begin());
813
814 PetscLogEventEnd(MOFEM_EVENT_createMPIAIJ, 0, 0, 0, 0);
816}
817
818template <>
820MatrixManager::createMPIAdjWithArrays<Idx_mi_tag>(const std::string name,
821 Mat *Adj, int verb) {
822 MoFEM::CoreInterface &m_field = cOre;
823 CreateRowCompressedADJMatrix *core_ptr =
824 static_cast<CreateRowCompressedADJMatrix *>(&cOre);
826 PetscLogEventBegin(MOFEM_EVENT_createMPIAdjWithArrays, 0, 0, 0, 0);
827
828 auto problems_ptr = m_field.get_problems();
829 auto &prb = problems_ptr->get<Problem_mi_tag>();
830 auto p_miit = prb.find(name);
831 if (p_miit == prb.end()) {
832 SETERRQ(m_field.get_comm(), MOFEM_NOT_FOUND,
833 "problem < %s > is not found (top tip: check spelling)",
834 name.c_str());
835 }
836
837 std::vector<int> i_vec, j_vec;
838 j_vec.reserve(10000);
839 CHKERR core_ptr->createMatArrays<Idx_mi_tag>(p_miit, MATMPIADJ, i_vec, j_vec,
840 true, verb);
841 int *_i, *_j;
842 CHKERR PetscMalloc(i_vec.size() * sizeof(int), &_i);
843 CHKERR PetscMalloc(j_vec.size() * sizeof(int), &_j);
844 copy(i_vec.begin(), i_vec.end(), _i);
845 copy(j_vec.begin(), j_vec.end(), _j);
846
847 int nb_col_dofs = p_miit->getNbDofsCol();
848 CHKERR MatCreateMPIAdj(m_field.get_comm(), i_vec.size() - 1, nb_col_dofs, _i,
849 _j, PETSC_NULLPTR, Adj);
850 CHKERR MatSetOption(*Adj, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
851
852 PetscLogEventEnd(MOFEM_EVENT_createMPIAdjWithArrays, 0, 0, 0, 0);
854}
855
856template <>
857MoFEMErrorCode MatrixManager::createSeqAIJWithArrays<PetscLocalIdx_mi_tag>(
858 const std::string name, Mat *Aij, int verb) {
859 MoFEM::CoreInterface &m_field = cOre;
860 CreateRowCompressedADJMatrix *core_ptr =
861 static_cast<CreateRowCompressedADJMatrix *>(&cOre);
863 PetscLogEventBegin(MOFEM_EVENT_createMPIAIJWithArrays, 0, 0, 0, 0);
864
865 auto problems_ptr = m_field.get_problems();
866 auto &prb = problems_ptr->get<Problem_mi_tag>();
867 auto p_miit = prb.find(name);
868 if (p_miit == prb.end()) {
869 SETERRQ(m_field.get_comm(), MOFEM_NOT_FOUND,
870 "problem < %s > is not found (top tip: check spelling)",
871 name.c_str());
872 }
873
874 std::vector<int> i_vec, j_vec;
875 j_vec.reserve(10000);
876 CHKERR core_ptr->createMatArrays<PetscGlobalIdx_mi_tag>(p_miit, MATAIJ, i_vec,
877 j_vec, false, verb);
878
879 int nb_local_dofs_row = p_miit->getNbLocalDofsRow();
880 int nb_local_dofs_col = p_miit->getNbLocalDofsCol();
881
882 double *_a;
883 CHKERR PetscMalloc(j_vec.size() * sizeof(double), &_a);
884
885 Mat tmpMat;
886 CHKERR ::MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, nb_local_dofs_row,
887 nb_local_dofs_col, &*i_vec.begin(),
888 &*j_vec.begin(), _a, &tmpMat);
889 CHKERR MatDuplicate(tmpMat, MAT_SHARE_NONZERO_PATTERN, Aij);
890 CHKERR MatDestroy(&tmpMat);
891
892 CHKERR PetscFree(_a);
893
894 PetscLogEventEnd(MOFEM_EVENT_createMPIAIJWithArrays, 0, 0, 0, 0);
896}
897
898MoFEMErrorCode MatrixManager::checkMatrixFillIn(const std::string problem_name,
899 int row_print, int col_print,
900 Mat A, int verb) {
901 MoFEM::CoreInterface &m_field = cOre;
903
904 PetscLogEventBegin(MOFEM_EVENT_checkMatrixFillIn, 0, 0, 0, 0);
905
906 struct TestMatrixFillIn : public FEMethod {
907 CoreInterface *mFieldPtr;
908
909 Mat A;
910
911 int rowPrint, colPrint;
912
913 TestMatrixFillIn(CoreInterface *m_field_ptr, Mat a, int row_print,
914 int col_print)
915 : mFieldPtr(m_field_ptr), A(a), rowPrint(row_print),
916 colPrint(col_print){};
917
918 MoFEMErrorCode preProcess() {
921 }
922
923 MoFEMErrorCode operator()() {
925
926 if (refinedFiniteElementsPtr->find(
927 numeredEntFiniteElementPtr->getEnt()) ==
928 refinedFiniteElementsPtr->end()) {
929 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
930 "data inconsistency");
931 }
932
933 auto row_dofs = getRowDofsPtr();
934 auto col_dofs = getColDofsPtr();
935
936 for (auto cit = col_dofs->begin(); cit != col_dofs->end(); cit++) {
937
938 if (refinedEntitiesPtr->find((*cit)->getEnt()) ==
939 refinedEntitiesPtr->end()) {
940 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
941 "data inconsistency");
942 }
943 if (!(*cit)->getActive()) {
944 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
945 "data inconsistency");
946 }
947
948 FieldEntityEntFiniteElementAdjacencyMap_multiIndex::index<
949 Composite_Unique_mi_tag>::type::iterator ait;
950 ait = adjacenciesPtr->get<Composite_Unique_mi_tag>().find(
951 boost::make_tuple((*cit)->getFieldEntityPtr()->getLocalUniqueId(),
952 numeredEntFiniteElementPtr->getLocalUniqueId()));
953 if (ait == adjacenciesPtr->end()) {
954 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
955 "adjacencies data inconsistency");
956 } else {
957 UId uid = ait->getEntUniqueId();
958 if (entitiesPtr->find(uid) == entitiesPtr->end()) {
959 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
960 "data inconsistency");
961 }
962 if (dofsPtr->find((*cit)->getLocalUniqueId()) == dofsPtr->end()) {
963 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
964 "data inconsistency");
965 }
966 }
967
968 if ((*cit)->getEntType() != MBVERTEX) {
969
970 auto range =
971 col_dofs->get<Ent_mi_tag>().equal_range((*cit)->getEnt());
972 int nb_dofs_on_ent = std::distance(range.first, range.second);
973
974 int max_order = (*cit)->getMaxOrder();
975 if ((*cit)->getNbOfCoeffs() * (*cit)->getOrderNbDofs(max_order) !=
976 nb_dofs_on_ent) {
977
978 /* It could be that you have
979 removed DOFs from problem, and for example if this was vector field
980 with components {Ux,Uy,Uz}, you removed on Uz element.*/
981
982 MOFEM_LOG("SELF", Sev::warning)
983 << "Warning: Number of Dofs in Col different than number "
984 "of dofs for given entity order "
985 << (*cit)->getNbOfCoeffs() * (*cit)->getOrderNbDofs(max_order)
986 << " " << nb_dofs_on_ent;
987 }
988 }
989 }
990
991 for (auto rit = row_dofs->begin(); rit != row_dofs->end(); rit++) {
992
993 if (refinedEntitiesPtr->find((*rit)->getEnt()) ==
994 refinedEntitiesPtr->end()) {
995 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
996 "data inconsistency");
997 }
998 if (!(*rit)->getActive()) {
999 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
1000 "data inconsistency");
1001 }
1002
1003 FieldEntityEntFiniteElementAdjacencyMap_multiIndex::index<
1004 Composite_Unique_mi_tag>::type::iterator ait;
1005 ait = adjacenciesPtr->get<Composite_Unique_mi_tag>().find(
1006 boost::make_tuple((*rit)->getFieldEntityPtr()->getLocalUniqueId(),
1007 numeredEntFiniteElementPtr->getLocalUniqueId()));
1008 if (ait == adjacenciesPtr->end()) {
1009 MOFEM_LOG_ATTRIBUTES("SYNC", LogManager::BitScope);
1010 MOFEM_LOG("SELF", Sev::error) << *(*rit);
1011 MOFEM_LOG("SELF", Sev::error) << *(*rit);
1012 MOFEM_LOG("SELF", Sev::error) << *numeredEntFiniteElementPtr;
1013 MOFEM_LOG("SELF", Sev::error) << "dof: " << (*rit)->getBitRefLevel();
1014 MOFEM_LOG("SELF", Sev::error)
1015 << "fe: " << numeredEntFiniteElementPtr->getBitRefLevel();
1016 MOFEM_LOG("SELF", Sev::error)
1017 << "problem: " << problemPtr->getBitRefLevel();
1018 MOFEM_LOG("SELF", Sev::error)
1019 << "problem mask: " << problemPtr->getBitRefLevelMask();
1020 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
1021 "adjacencies data inconsistency");
1022 } else {
1023 UId uid = ait->getEntUniqueId();
1024 if (entitiesPtr->find(uid) == entitiesPtr->end()) {
1025 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
1026 "data inconsistency");
1027 }
1028 if (dofsPtr->find((*rit)->getLocalUniqueId()) == dofsPtr->end()) {
1029 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
1030 "data inconsistency");
1031 }
1032 }
1033 int row = (*rit)->getPetscGlobalDofIdx();
1034
1035 auto col_dofs = getColDofsPtr();
1036 for (auto cit = col_dofs->begin(); cit != col_dofs->end(); cit++) {
1037
1038 int col = (*cit)->getPetscGlobalDofIdx();
1039
1040 if (row == rowPrint && col == colPrint) {
1041 MOFEM_LOG("SELF", Sev::noisy) << "fe:\n"
1042 << *numeredEntFiniteElementPtr;
1043 MOFEM_LOG("SELF", Sev::noisy) << "row:\n" << *(*rit);
1044 MOFEM_LOG("SELF", Sev::noisy) << "col:\n" << *(*cit);
1045 MOFEM_LOG("SELF", Sev::noisy)
1046 << "fe:\n"
1047 << numeredEntFiniteElementPtr->getBitRefLevel();
1048 MOFEM_LOG("SELF", Sev::noisy) << "row:\n"
1049 << (*rit)->getBitRefLevel();
1050 MOFEM_LOG("SELF", Sev::noisy) << "col:\n"
1051 << (*cit)->getBitRefLevel();
1052 }
1053
1054 CHKERR MatSetValue(A, row, col, 1, INSERT_VALUES);
1055 }
1056
1057 if ((*rit)->getEntType() != MBVERTEX) {
1058
1059 auto range =
1060 row_dofs->get<Ent_mi_tag>().equal_range((*rit)->getEnt());
1061 int nb_dofs_on_ent = std::distance(range.first, range.second);
1062
1063 int max_order = (*rit)->getMaxOrder();
1064 if ((*rit)->getNbOfCoeffs() * (*rit)->getOrderNbDofs(max_order) !=
1065 nb_dofs_on_ent) {
1066
1067 /* It could be that you have removed DOFs from problem, and for
1068 * example if this was vector field with components {Ux,Uy,Uz}, you
1069 * removed on Uz element. */
1070
1071 MOFEM_LOG("SELF", Sev::warning)
1072 << "Warning: Number of Dofs in Row different than number "
1073 "of dofs for given entity order "
1074 << (*rit)->getNbOfCoeffs() * (*rit)->getOrderNbDofs(max_order)
1075 << " " << nb_dofs_on_ent;
1076 }
1077 }
1078 }
1079
1081 }
1082
1083 MoFEMErrorCode postProcess() {
1085
1086 // cerr << mFieldPtr->get_comm_rank() << endl;
1087 CHKERR MatAssemblyBegin(A, MAT_FLUSH_ASSEMBLY);
1088 CHKERR MatAssemblyEnd(A, MAT_FLUSH_ASSEMBLY);
1089
1091 }
1092 };
1093
1094 // create matrix
1095 CHKERR MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE);
1096
1097 if (verb >= VERY_VERBOSE) {
1098 MatView(A, PETSC_VIEWER_STDOUT_WORLD);
1099 }
1100
1101 if (verb >= NOISY) {
1102 MatView(A, PETSC_VIEWER_DRAW_WORLD);
1103 std::string wait;
1104 std::cin >> wait;
1105 }
1106
1107 TestMatrixFillIn method(&m_field, A, row_print, col_print);
1108
1109 // get problem
1110 auto problems_ptr = m_field.get_problems();
1111 auto &prb_set = problems_ptr->get<Problem_mi_tag>();
1112 auto p_miit = prb_set.find(problem_name);
1113 if (p_miit == prb_set.end())
1114 SETERRQ(m_field.get_comm(), MOFEM_DATA_INCONSISTENCY,
1115 "problem < %s > not found (top tip: check spelling)",
1116 problem_name.c_str());
1117 MOFEM_LOG_C("WORLD", Sev::inform, "check problem < %s >",
1118 problem_name.c_str());
1119
1120 // loop all elements in problem and check if assemble is without error
1121 auto fe_ptr = m_field.get_finite_elements();
1122 for (auto &fe : *fe_ptr) {
1123 MOFEM_LOG_C("WORLD", Sev::verbose, "\tcheck element %s",
1124 fe->getName().c_str());
1125 CHKERR m_field.loop_finite_elements(problem_name, fe->getName(), method,
1126 nullptr, MF_EXIST,
1127 CacheTupleSharedPtr(), verb);
1128 }
1129
1130 CHKERR MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
1131 CHKERR MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
1132
1133 PetscLogEventEnd(MOFEM_EVENT_checkMatrixFillIn, 0, 0, 0, 0);
1134
1136}
1137
1138template <>
1140MatrixManager::checkMPIAIJWithArraysMatrixFillIn<PetscGlobalIdx_mi_tag>(
1141 const std::string problem_name, int row_print, int col_print, int verb) {
1143 // create matrix
1144 SmartPetscObj<Mat> A;
1145 CHKERR createMPIAIJWithArrays<PetscGlobalIdx_mi_tag>(problem_name, A, verb);
1146 CHKERR MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE);
1147 CHKERR checkMatrixFillIn(problem_name, row_print, col_print, A, verb);
1149}
1150
1151template <>
1152MoFEMErrorCode MatrixManager::checkMPIAIJMatrixFillIn<PetscGlobalIdx_mi_tag>(
1153 const std::string problem_name, int row_print, int col_print, int verb) {
1155 // create matrix
1156 SmartPetscObj<Mat> A;
1157 CHKERR createMPIAIJ<PetscGlobalIdx_mi_tag>(problem_name, A, verb);
1158 CHKERR MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE);
1159 CHKERR checkMatrixFillIn(problem_name, row_print, col_print, A, verb);
1161}
1162
1163template <>
1164MoFEMErrorCode MatrixManager::createHybridL2MPIAIJ<PetscGlobalIdx_mi_tag>(
1165 const std::string problem_name, SmartPetscObj<Mat> &aij_ptr, int verb) {
1166 MoFEM::CoreInterface &m_field = cOre;
1168
1169 auto prb_ptr = m_field.get_problems();
1170 auto p_miit = prb_ptr->get<Problem_mi_tag>().find(problem_name);
1171 if (p_miit == prb_ptr->get<Problem_mi_tag>().end()) {
1172 SETERRQ(m_field.get_comm(), MOFEM_NOT_FOUND,
1173 "problem < %s > is not found (top tip: check spelling)",
1174 problem_name.c_str());
1175 }
1176
1177 auto nb_rows = p_miit->getNbDofsRow();
1178 auto nb_cols = p_miit->getNbDofsCol();
1179 auto nb_loc_rows = p_miit->getNbLocalDofsRow();
1180 auto nb_loc_cols = p_miit->getNbLocalDofsCol();
1181
1182 auto row_ptr = p_miit->getNumeredRowDofsPtr();
1183 auto col_ptr = p_miit->getNumeredColDofsPtr();
1184
1185 BitFieldId fields_ids;
1186 for (auto &c : *col_ptr) {
1187 fields_ids |= c->getId();
1188 }
1189 auto fields = m_field.get_fields();
1190 std::vector<int> fields_bit_numbers;
1191 for (auto &f : *fields) {
1192 if ((fields_ids & f->getId()).any()) {
1193 fields_bit_numbers.push_back(f->getBitNumber());
1194 }
1195 }
1196
1197 Range adj;
1198 EntityHandle prev_ent = 0;
1199 int prev_dim = -1;
1200
1201 auto get_adj = [&](auto ent, auto dim) {
1202 if (prev_ent == ent && prev_dim == dim) {
1203 return adj;
1204 } else {
1205 adj.clear();
1206 Range bridge;
1207 CHKERR m_field.get_moab().get_adjacencies(&ent, 1, dim + 1, false,
1208 bridge);
1209 CHKERR m_field.get_moab().get_adjacencies(bridge, dim, false, adj,
1210 moab::Interface::UNION);
1211 prev_ent = ent;
1212 prev_dim = dim;
1213 return adj;
1214 }
1215 };
1216
1217 int prev_bit_number = -1;
1218 EntityHandle prev_first_ent = 0;
1219 EntityHandle prev_second_ent = 0;
1220 using IT = boost::multi_index::index<NumeredDofEntity_multiIndex,
1221 Unique_mi_tag>::type::iterator;
1222 std::pair<IT, IT> pair_lo_hi;
1223 auto get_col_it = [&](auto bit_number, auto first_ent, auto second_ent) {
1224 if (bit_number == prev_bit_number && first_ent == prev_first_ent &&
1225 second_ent == prev_second_ent) {
1226 return pair_lo_hi;
1227 } else {
1228 auto lo_it = col_ptr->get<Unique_mi_tag>().lower_bound(
1229 DofEntity::getLoFieldEntityUId(bit_number, first_ent));
1230 auto hi_it = col_ptr->get<Unique_mi_tag>().upper_bound(
1231 DofEntity::getHiFieldEntityUId(bit_number, second_ent));
1232 pair_lo_hi = std::make_pair(lo_it, hi_it);
1233 prev_bit_number = bit_number;
1234 prev_first_ent = first_ent;
1235 prev_second_ent = second_ent;
1236 return pair_lo_hi;
1237 }
1238 };
1239
1240 auto create_ghost_vec = [&]() {
1241 SmartPetscObj<Vec> v;
1242 CHKERR m_field.getInterface<VecManager>()->vecCreateGhost(problem_name, ROW,
1243 v);
1244 CHKERR VecZeroEntries(v);
1245 CHKERR VecGhostUpdateBegin(v, INSERT_VALUES, SCATTER_FORWARD);
1246 CHKERR VecGhostUpdateEnd(v, INSERT_VALUES, SCATTER_FORWARD);
1247 return v;
1248 };
1249
1250 auto v_o_nnz = create_ghost_vec();
1251 auto v_d_nnz = create_ghost_vec();
1252
1253
1254 double *o_nnz_real;
1255 CHKERR VecGetArray(v_o_nnz, &o_nnz_real);
1256 double *d_nnz_real;
1257 CHKERR VecGetArray(v_d_nnz, &d_nnz_real);
1258
1259 for (auto r = row_ptr->begin(); r != row_ptr->end(); ++r) {
1260
1261 auto row_loc_idx = (*r)->getPetscLocalDofIdx();
1262 if (row_loc_idx < 0)
1263 continue;
1264
1265 auto ent = (*r)->getEnt();
1266 auto dim = dimension_from_handle(ent);
1267 auto adj = get_adj(ent, dim);
1268
1269 for (auto p = adj.pair_begin(); p != adj.pair_end(); ++p) {
1270 auto first_ent = p->first;
1271 auto second_ent = p->second;
1272
1273 for (auto bit_number : fields_bit_numbers) {
1274
1275 auto [lo_it, hi_it] = get_col_it(bit_number, first_ent, second_ent);
1276
1277 for (; lo_it != hi_it; ++lo_it) {
1278 auto col_loc_idx = (*lo_it)->getPetscLocalDofIdx();
1279 if (col_loc_idx < 0)
1280 continue;
1281
1282 if (
1283
1284 (*lo_it)->getOwnerProc() == (*r)->getOwnerProc()
1285
1286 ) {
1287 d_nnz_real[row_loc_idx] += 1;
1288 } else {
1289 o_nnz_real[row_loc_idx] += 1;
1290 }
1291 }
1292 }
1293 }
1294 }
1295
1296 CHKERR VecRestoreArray(v_o_nnz, &o_nnz_real);
1297 CHKERR VecRestoreArray(v_d_nnz, &d_nnz_real);
1298
1299 auto update_vec = [&](auto v) {
1301 CHKERR VecGhostUpdateBegin(v, ADD_VALUES, SCATTER_REVERSE);
1302 CHKERR VecGhostUpdateEnd(v, ADD_VALUES, SCATTER_REVERSE);
1303 CHKERR VecAssemblyBegin(v);
1304 CHKERR VecAssemblyEnd(v);
1306 };
1307
1308 CHKERR update_vec(v_o_nnz);
1309 CHKERR update_vec(v_d_nnz);
1310
1311 int o_nz = 0;
1312 int d_nz = 0;
1313
1314 CHKERR VecGetArray(v_d_nnz, &d_nnz_real);
1315 CHKERR VecGetArray(v_o_nnz, &o_nnz_real);
1316
1317 std::vector<int> d_nnz(nb_loc_rows, 0);
1318 std::vector<int> o_nnz(nb_loc_rows, 0);
1319 for (auto r = row_ptr->begin(); r != row_ptr->end(); ++r) {
1320
1321 auto row_loc_idx = (*r)->getPetscLocalDofIdx();
1322
1323 if (
1324
1325 row_loc_idx >= 0 && row_loc_idx < nb_loc_rows
1326
1327 ) {
1328 auto row_loc_idx = (*r)->getPetscLocalDofIdx();
1329 d_nz += o_nnz_real[row_loc_idx];
1330 d_nnz[row_loc_idx] = d_nnz_real[row_loc_idx];
1331 o_nz += o_nnz_real[row_loc_idx];
1332 o_nnz[row_loc_idx] = o_nnz_real[row_loc_idx];
1333 }
1334
1335 }
1336
1337 CHKERR VecRestoreArray(v_o_nnz, &o_nnz_real);
1338 CHKERR VecRestoreArray(v_d_nnz, &d_nnz_real);
1339
1340 if (verb >= QUIET) {
1341 MOFEM_LOG("SYNC", Sev::verbose)
1342 << "Hybrid L2 matrix d_nz: " << d_nz << " o_nz: " << o_nz;
1343 MOFEM_LOG_SEVERITY_SYNC(m_field.get_comm(), Sev::verbose);
1344 }
1345
1346 if (verb >= VERBOSE) {
1347 MOFEM_LOG("SYNC", Sev::noisy) << "Hybrid L2 matrix";
1348 int idx = 0;
1349 for (auto &d : d_nnz) {
1350 MOFEM_LOG("SYNC", Sev::noisy) << idx << ": " << d;
1351 ++idx;
1352 }
1353 MOFEM_LOG_SEVERITY_SYNC(m_field.get_comm(), Sev::noisy);
1354 }
1355
1356 Mat a_raw;
1357 CHKERR MatCreateAIJ(m_field.get_comm(), nb_loc_rows, nb_loc_cols, nb_rows,
1358 nb_cols, 0, &*d_nnz.begin(), 0, &*o_nnz.begin(), &a_raw);
1359 CHKERR MatSetOption(a_raw, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE);
1360 aij_ptr = SmartPetscObj<Mat>(a_raw);
1361
1362 MOFEM_LOG_CHANNEL("WORLD");
1363 MOFEM_LOG_CHANNEL("SYNC");
1364
1366}
1367
1368} // namespace MoFEM
#define MOFEM_LOG_SEVERITY_SYNC(comm, severity)
Synchronise "SYNC" on curtain severity level.
#define MOFEM_LOG_SYNCHRONISE(comm)
Synchronise "SYNC" channel.
#define MOFEM_LOG_C(channel, severity, format,...)
#define MatrixManagerFunctionBegin
Create adjacent matrices using different indices.
constexpr double a
@ VERY_VERBOSE
@ QUIET
@ NOISY
@ VERBOSE
@ ROW
@ MF_EXIST
#define MAX_DOFS_ON_ENTITY
Maximal number of DOFs on entity.
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
@ MOFEM_NOT_FOUND
Definition definitions.h:33
@ MOFEM_DATA_INCONSISTENCY
Definition definitions.h:31
@ MOFEM_NOT_IMPLEMENTED
Definition definitions.h:32
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
@ BYROW
#define CHKERR
Inline error check.
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
static const bool debug
multi_index_container< boost::shared_ptr< NumeredDofEntity >, indexed_by< ordered_unique< tag< Unique_mi_tag >, const_mem_fun< NumeredDofEntity::interface_type_DofEntity, UId, &NumeredDofEntity::getLocalUniqueId > >, ordered_non_unique< tag< Part_mi_tag >, member< NumeredDofEntity, unsigned int, &NumeredDofEntity::pArt > >, ordered_non_unique< tag< Idx_mi_tag >, member< NumeredDofEntity, DofIdx, &NumeredDofEntity::dofIdx > >, ordered_non_unique< tag< PetscGlobalIdx_mi_tag >, member< NumeredDofEntity, DofIdx, &NumeredDofEntity::petscGloablDofIdx > >, ordered_non_unique< tag< PetscLocalIdx_mi_tag >, member< NumeredDofEntity, DofIdx, &NumeredDofEntity::petscLocalDofIdx > >, ordered_non_unique< tag< Ent_mi_tag >, const_mem_fun< NumeredDofEntity::interface_type_DofEntity, EntityHandle, &NumeredDofEntity::getEnt > > > > NumeredDofEntity_multiIndex
MultiIndex container keeps NumeredDofEntity.
virtual const FiniteElement_multiIndex * get_finite_elements() const =0
Get the finite elements object.
virtual const Problem_multiIndex * get_problems() const =0
Get the problems object.
virtual const Field_multiIndex * get_fields() const =0
Get the fields object.
#define MOFEM_LOG(channel, severity)
Log.
#define MOFEM_LOG_CHANNEL(channel)
Set and reset channel.
#define MOFEM_LOG_ATTRIBUTES(channel, bit)
Add attributes to channel.
virtual MoFEMErrorCode loop_finite_elements(const std::string problem_name, const std::string &fe_name, FEMethod &method, boost::shared_ptr< NumeredEntFiniteElement_multiIndex > fe_ptr=nullptr, MoFEMTypes bh=MF_EXIST, CacheTupleWeakPtr cache_ptr=CacheTupleSharedPtr(), int verb=DEFAULT_VERBOSITY)=0
Make a loop over finite elements.
FTensor::Index< 'i', SPACE_DIM > i
const double c
speed of light (cm/ns)
const double v
phase velocity of light in medium (cm/ns)
FTensor::Index< 'j', 3 > j
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
int DofIdx
Index of DOF.
Definition Types.hpp:18
std::bitset< BITFIELDID_SIZE > BitFieldId
Field Id.
Definition Types.hpp:42
uint128_t UId
Unique Id.
Definition Types.hpp:31
std::bitset< BITREFLEVEL_SIZE > BitRefLevel
Bit structure attached to each entity identifying to what mesh entity is attached.
Definition Types.hpp:40
implementation of Data Operators for Forces and Sources
Definition Common.hpp:10
CreateRowCompressedADJMatrix CreateRowComressedADJMatrix
Problem::EmptyFieldBlocks EmptyFieldBlocks
auto dimension_from_handle(const EntityHandle h)
get entity dimension form handle
CoreTmp< 0 > Core
Definition Core.hpp:1148
boost::shared_ptr< CacheTuple > CacheTupleSharedPtr
int r
Definition sdf.py:8
constexpr AssemblyType A
virtual moab::Interface & get_moab()=0
virtual MPI_Comm & get_comm() const =0
Core (interface) class.
Definition Core.hpp:82
MoFEMErrorCode getInterface(IFACE *&iface) const
Get interface reference to pointer of interface.
constexpr IntegrationType IT