v0.14.0
Loading...
Searching...
No Matches
MatrixManager.cpp
Go to the documentation of this file.
1/**
2 * \brief Create adjacent matrices using different indices
3 *
4 */
5
6#define MatrixManagerFunctionBegin \
7 MoFEMFunctionBegin; \
8 MOFEM_LOG_CHANNEL("WORLD"); \
9 MOFEM_LOG_CHANNEL("SYNC"); \
10 MOFEM_LOG_FUNCTION(); \
11 MOFEM_LOG_TAG("SYNC", "MatrixManager"); \
12 MOFEM_LOG_TAG("WORLD", "MatrixManager")
13
14namespace MoFEM {
15
16/** \brief Create compressed matrix
17
18 \note Only function class members are allowed in this class. NO VARIABLES.
19
20 \todo It is obsolete implementation, code should be moved to interface
21 MatrixManager.
22
23 \todo While matrix is created is assumed that all entities on element are
24 adjacent to each other, in some cases, this is created denser matrices than it
25 should be. Some kind of filtering can be added.
26
27 \todo Creation of the block matrices
28
29 \todo Some efficiency improvemnt are possible
30
31
32 */
34
36 MPI_Comm comm = PETSC_COMM_WORLD, int verbose = 1)
37 : Core(moab, comm, verbose) {}
38
39 typedef FieldEntityEntFiniteElementAdjacencyMap_multiIndex::index<
41 typedef Problem_multiIndex::index<Problem_mi_tag>::type ProblemsByName;
42 typedef NumeredDofEntity_multiIndex::index<PetscGlobalIdx_mi_tag>::type
44
45 /** \brief Create matrix adjacencies
46
47 Depending on TAG type, which some structure used to number dofs, matrix is
48 partitioned using part number stored in multi-index, or is partitioned on
49 parts based only on index number.
50
51 See: Idx_mi_tag PetscGlobalIdx_mi_tag and PetscLocalIdx_mi_tag
52
53 */
54 template <typename TAG>
56 createMatArrays(ProblemsByName::iterator p_miit, const MatType type,
57 std::vector<PetscInt> &i, std::vector<PetscInt> &j,
58 const bool no_diagonals = true, int verb = QUIET) const;
59
60 /** \brief Get element adjacencies
61 */
62 template <typename TAG>
64 ProblemsByName::iterator p_miit,
65 typename boost::multi_index::index<NumeredDofEntity_multiIndex,
66 TAG>::type::iterator mit_row,
67 boost::shared_ptr<FieldEntity> mofem_ent_ptr,
68 std::vector<int> &dofs_col_view, int verb) const;
69};
70
71template <typename TAG>
73 ProblemsByName::iterator p_miit,
74 typename boost::multi_index::index<NumeredDofEntity_multiIndex,
75 TAG>::type::iterator mit_row,
76 boost::shared_ptr<FieldEntity> mofem_ent_ptr,
77 std::vector<int> &dofs_col_view, int verb) const {
79
80 BitRefLevel prb_bit = p_miit->getBitRefLevel();
81 BitRefLevel prb_mask = p_miit->getBitRefLevelMask();
82
83 const EmptyFieldBlocks &empty_field_blocks = p_miit->getEmptyFieldBlocks();
84
85 dofs_col_view.clear();
86 for (auto r = entFEAdjacencies.get<Unique_mi_tag>().equal_range(
87 mofem_ent_ptr->getLocalUniqueId());
88 r.first != r.second; ++r.first) {
89
90 if (r.first->byWhat & BYROW) {
91
92 if ((r.first->entFePtr->getId() & p_miit->getBitFEId()).none()) {
93 // if element is not part of problem
94 continue;
95 }
96
97 const BitRefLevel &fe_bit = r.first->entFePtr->getBitRefLevel();
98 // if entity is not problem refinement level
99 if ((fe_bit & prb_bit).any() && (fe_bit & prb_mask) == fe_bit) {
100
101 const bool empty_row_block =
102 (empty_field_blocks.first & (*mit_row)->getId()).none();
103
104 for (auto &it : r.first->entFePtr->getColFieldEnts()) {
105 if (auto e = it.lock()) {
106 if (empty_row_block ||
107 (empty_field_blocks.second & e->getId()).none()) {
108
109 if (auto cache = e->entityCacheColDofs.lock()) {
110 const auto lo = cache->loHi[0];
111 const auto hi = cache->loHi[1];
112 for (auto vit = lo; vit != hi; ++vit) {
113
114 const int idx = TAG::get_index(vit);
115 if (PetscLikely(idx >= 0))
116 dofs_col_view.push_back(idx);
117
118#ifndef NDEBUG
119 const DofIdx nb_dofs_col = p_miit->getNbDofsCol();
120 if (PetscUnlikely(idx >= nb_dofs_col)) {
121 MOFEM_LOG("SELF", Sev::error)
122 << "Problem with dof: " << std::endl
123 << "Rank " << rAnk << " : " << *(*vit);
124 SETERRQ(
125 mofemComm, PETSC_ERR_ARG_SIZ,
126 "Index of dof larger than number of DOFs in column");
127 }
128#endif
129 }
130
131 } else {
132 SETERRQ(mofemComm, MOFEM_DATA_INCONSISTENCY, "Cache not set");
133 }
134 }
135 }
136 }
137 }
138 }
139 }
140
142}
143
144template <typename TAG>
146 ProblemsByName::iterator p_miit, const MatType type,
147 std::vector<PetscInt> &i, std::vector<PetscInt> &j, const bool no_diagonals,
148 int verb) const {
150
151 PetscLogEventBegin(MOFEM_EVENT_createMat, 0, 0, 0, 0);
152
153 auto cache = boost::make_shared<std::vector<EntityCacheNumeredDofs>>(
154 entsFields.size());
155
156 size_t idx = 0;
157 for (auto it = entsFields.begin(); it != entsFields.end(); ++it, ++idx) {
158
159 const auto uid = (*it)->getLocalUniqueId();
160 auto r = entFEAdjacencies.get<Unique_mi_tag>().equal_range(uid);
161 for (auto lo = r.first; lo != r.second; ++lo) {
162
163 if ((lo->getBitFEId() & p_miit->getBitFEId()).any()) {
164
165 const BitRefLevel &prb_bit = p_miit->getBitRefLevel();
166 const BitRefLevel &prb_mask = p_miit->getBitRefLevelMask();
167 const BitRefLevel &fe_bit = lo->entFePtr->getBitRefLevel();
168
169 // if entity is not problem refinement level
170 if ((fe_bit & prb_mask) != fe_bit)
171 continue;
172 if ((fe_bit & prb_bit).none())
173 continue;
174
175 auto dit = p_miit->numeredColDofsPtr->lower_bound(uid);
176
177 decltype(dit) hi_dit;
178 if (dit != p_miit->numeredColDofsPtr->end())
179 hi_dit = p_miit->numeredColDofsPtr->upper_bound(
180 uid | static_cast<UId>(MAX_DOFS_ON_ENTITY - 1));
181 else
182 hi_dit = dit;
183
184 (*it)->entityCacheColDofs =
185 boost::shared_ptr<EntityCacheNumeredDofs>(cache, &((*cache)[idx]));
186 (*cache)[idx].loHi = {dit, hi_dit};
187
188 break;
189 }
190 }
191 }
192
193 using NumeredDofEntitysByIdx =
194 typename boost::multi_index::index<NumeredDofEntity_multiIndex,
195 TAG>::type;
196
197 // Get multi-indices for rows and columns
198 const NumeredDofEntitysByIdx &dofs_row_by_idx =
199 p_miit->numeredRowDofsPtr->get<TAG>();
200 int nb_dofs_row = p_miit->getNbDofsRow();
201 if (nb_dofs_row == 0) {
202 SETERRQ1(mofemComm, MOFEM_DATA_INCONSISTENCY, "problem <%s> has zero rows",
203 p_miit->getName().c_str());
204 }
205
206 // Get adjacencies form other processors
207 std::map<int, std::vector<int>> adjacent_dofs_on_other_parts;
208
209 // If not partitioned set petsc layout for matrix. If partitioned need to get
210 // adjacencies form other parts. Note if algebra is only partitioned no need
211 // to collect adjacencies form other entities. Those are already on mesh
212 // which is assumed that is on each processor the same.
213 typename boost::multi_index::index<NumeredDofEntity_multiIndex,
214 TAG>::type::iterator miit_row,
215 hi_miit_row;
216
217 if (TAG::IamNotPartitioned) {
218
219 // Get range of local indices
220 PetscLayout layout;
221 CHKERR PetscLayoutCreate(mofemComm, &layout);
222 CHKERR PetscLayoutSetBlockSize(layout, 1);
223 CHKERR PetscLayoutSetSize(layout, nb_dofs_row);
224 CHKERR PetscLayoutSetUp(layout);
225 PetscInt rstart, rend;
226 CHKERR PetscLayoutGetRange(layout, &rstart, &rend);
227 CHKERR PetscLayoutDestroy(&layout);
228
229 if (verb >= VERBOSE) {
230 MOFEM_LOG("SYNC", Sev::noisy)
231 << "row lower " << rstart << " row upper " << rend;
233 }
234
235 miit_row = dofs_row_by_idx.lower_bound(rstart);
236 hi_miit_row = dofs_row_by_idx.lower_bound(rend);
237 if (std::distance(miit_row, hi_miit_row) != rend - rstart) {
238 SETERRQ4(
239 mofemComm, PETSC_ERR_ARG_SIZ,
240 "data inconsistency, std::distance(miit_row,hi_miit_row) != rend - "
241 "rstart (%d != %d - %d = %d) ",
242 std::distance(miit_row, hi_miit_row), rend, rstart, rend - rstart);
243 }
244
245 } else {
246
247 MPI_Comm comm;
248 // // Make sure it is a PETSc mofemComm
249 CHKERR PetscCommDuplicate(get_comm(), &comm, NULL);
250
251 // get adjacent nodes on other partitions
252 std::vector<std::vector<int>> dofs_vec(sIze);
253
254 boost::shared_ptr<FieldEntity> mofem_ent_ptr;
255 std::vector<int> dofs_col_view;
256
257 typename boost::multi_index::index<NumeredDofEntity_multiIndex,
258 TAG>::type::iterator mit_row,
259 hi_mit_row;
260
261 mit_row = dofs_row_by_idx.begin();
262 hi_mit_row = dofs_row_by_idx.end();
263 for (; mit_row != hi_mit_row; mit_row++) {
264
265 // Shared or multishared row and not owned. Those data should be send to
266 // other side.
267
268 // Get entity adjacencies, no need to repeat that operation for dofs when
269 // are on the same entity. For simplicity is assumed that those sheered
270 // the same adjacencies.
271 unsigned char pstatus = (*mit_row)->getPStatus();
272 if ((pstatus & PSTATUS_NOT_OWNED) &&
273 (pstatus & (PSTATUS_SHARED | PSTATUS_MULTISHARED))) {
274
275 bool get_adj_col = true;
276 if (mofem_ent_ptr) {
277 if (mofem_ent_ptr->getLocalUniqueId() ==
278 (*mit_row)->getFieldEntityPtr()->getLocalUniqueId()) {
279 get_adj_col = false;
280 }
281 }
282
283 if (get_adj_col) {
284 // Get entity adjacencies
285 mofem_ent_ptr = (*mit_row)->getFieldEntityPtr();
286 CHKERR getEntityAdjacenies<TAG>(p_miit, mit_row, mofem_ent_ptr,
287 dofs_col_view, verb);
288 // Sort, unique and resize dofs_col_view
289 {
290 std::sort(dofs_col_view.begin(), dofs_col_view.end());
291 std::vector<int>::iterator new_end =
292 std::unique(dofs_col_view.begin(), dofs_col_view.end());
293 int new_size = std::distance(dofs_col_view.begin(), new_end);
294 dofs_col_view.resize(new_size);
295 }
296 // Add that row. Patterns is that first index is row index, second is
297 // size of adjacencies after that follows column adjacencies.
298 int owner = (*mit_row)->getOwnerProc();
299 dofs_vec[owner].emplace_back(TAG::get_index(mit_row)); // row index
300 dofs_vec[owner].emplace_back(
301 dofs_col_view.size()); // nb. of column adjacencies
302 // add adjacent cools
303 dofs_vec[owner].insert(dofs_vec[owner].end(), dofs_col_view.begin(),
304 dofs_col_view.end());
305 }
306 }
307 }
308
309 int nsends = 0; // number of messages to send
310 std::vector<int> dofs_vec_length(sIze); // length of the message to proc
311 for (int proc = 0; proc < sIze; proc++) {
312
313 if (!dofs_vec[proc].empty()) {
314
315 dofs_vec_length[proc] = dofs_vec[proc].size();
316 nsends++;
317
318 } else {
319
320 dofs_vec_length[proc] = 0;
321 }
322 }
323
324 std::vector<MPI_Status> status(sIze);
325
326 // Computes the number of messages a node expects to receive
327 int nrecvs; // number of messages received
328 CHKERR PetscGatherNumberOfMessages(comm, NULL, &dofs_vec_length[0],
329 &nrecvs);
330
331 // Computes info about messages that a MPI-node will receive, including
332 // (from-id,length) pairs for each message.
333 int *onodes; // list of node-ids from which messages are expected
334 int *olengths; // corresponding message lengths
335 CHKERR PetscGatherMessageLengths(comm, nsends, nrecvs, &dofs_vec_length[0],
336 &onodes, &olengths);
337
338 // Gets a unique new tag from a PETSc communicator.
339 int tag;
340 CHKERR PetscCommGetNewTag(comm, &tag);
341
342 // Allocate a buffer sufficient to hold messages of size specified in
343 // olengths. And post Irecvs on these buffers using node info from onodes
344 int **rbuf; // must bee freed by user
345 MPI_Request *r_waits; // must bee freed by user
346
347 // rbuf has a pointers to messages. It has size of of nrecvs (number of
348 // messages) +1. In the first index a block is allocated,
349 // such that rbuf[i] = rbuf[i-1]+olengths[i-1].
350 CHKERR PetscPostIrecvInt(comm, tag, nrecvs, onodes, olengths, &rbuf,
351 &r_waits);
352
353 MPI_Request *s_waits; // status of sens messages
354 CHKERR PetscMalloc1(nsends, &s_waits);
355
356 // Send messages
357 for (int proc = 0, kk = 0; proc < sIze; proc++) {
358 if (!dofs_vec_length[proc])
359 continue; // no message to send to this proc
360 CHKERR MPI_Isend(&(dofs_vec[proc])[0], // buffer to send
361 dofs_vec_length[proc], // message length
362 MPIU_INT, proc, // to proc
363 tag, comm, s_waits + kk);
364 kk++;
365 }
366
367 // Wait for received
368 if (nrecvs) {
369 CHKERR MPI_Waitall(nrecvs, r_waits, &status[0]);
370 }
371 // Wait for send messages
372 if (nsends) {
373 CHKERR MPI_Waitall(nsends, s_waits, &status[0]);
374 }
375
376 for (int kk = 0; kk < nrecvs; kk++) {
377
378 int len = olengths[kk];
379 int *data_from_proc = rbuf[kk];
380
381 for (int ii = 0; ii < len;) {
382
383 int row_idx = data_from_proc[ii++]; // get row number
384 int nb_adj_dofs = data_from_proc[ii++]; // get nb. of adjacent dofs
385
386 if (debug) {
387
388 DofByGlobalPetscIndex::iterator dit;
389 dit = p_miit->numeredRowDofsPtr->get<PetscGlobalIdx_mi_tag>().find(
390 row_idx);
391 if (dit ==
392 p_miit->numeredRowDofsPtr->get<PetscGlobalIdx_mi_tag>().end()) {
394 "dof %d can not be found in problem", row_idx);
395 }
396 }
397
398 for (int jj = 0; jj < nb_adj_dofs; jj++) {
399 adjacent_dofs_on_other_parts[row_idx].push_back(data_from_proc[ii++]);
400 }
401 }
402 }
403
404 // Cleaning
405 CHKERR PetscFree(s_waits);
406 CHKERR PetscFree(rbuf[0]);
407 CHKERR PetscFree(rbuf);
408 CHKERR PetscFree(r_waits);
409 CHKERR PetscFree(onodes);
410 CHKERR PetscFree(olengths);
411
412 miit_row = dofs_row_by_idx.begin();
413 hi_miit_row = dofs_row_by_idx.end();
414
415 CHKERR PetscCommDestroy(&comm);
416 }
417
418 boost::shared_ptr<FieldEntity> mofem_ent_ptr;
419 int row_last_evaluated_idx = -1;
420
421 std::vector<int> dofs_vec;
422 std::vector<int> dofs_col_view;
423
424 // loop local rows
425 int nb_loc_row_from_iterators = 0;
426 unsigned int rows_to_fill = p_miit->getNbLocalDofsRow();
427 i.reserve(rows_to_fill + 1);
428 for (; miit_row != hi_miit_row; miit_row++) {
429
430 if (!TAG::IamNotPartitioned) {
431 if (static_cast<int>((*miit_row)->getPart()) != rAnk)
432 continue;
433 }
434 // This is only for cross-check if everything is ok
435 nb_loc_row_from_iterators++;
436
437 // add next row to compressed matrix
438 i.push_back(j.size());
439
440 // Get entity adjacencies, no need to repeat that operation for dofs when
441 // are on the same entity. For simplicity is assumed that those share the
442 // same adjacencies.
443 if ((!mofem_ent_ptr)
444 ? 1
445 : (mofem_ent_ptr->getLocalUniqueId() !=
446 (*miit_row)->getFieldEntityPtr()->getLocalUniqueId())) {
447
448 // get entity adjacencies
449 mofem_ent_ptr = (*miit_row)->getFieldEntityPtr();
450 CHKERR getEntityAdjacenies<TAG>(p_miit, miit_row, mofem_ent_ptr,
451 dofs_col_view, verb);
452 row_last_evaluated_idx = TAG::get_index(miit_row);
453
454 dofs_vec.resize(0);
455 // insert dofs_col_view
456 dofs_vec.insert(dofs_vec.end(), dofs_col_view.begin(),
457 dofs_col_view.end());
458
459 unsigned char pstatus = (*miit_row)->getPStatus();
460 if (pstatus > 0) {
461 std::map<int, std::vector<int>>::iterator mit;
462 mit = adjacent_dofs_on_other_parts.find(row_last_evaluated_idx);
463 if (mit == adjacent_dofs_on_other_parts.end()) {
464 // NOTE: Dof can adjacent to other part but no elements are there
465 // which use that dof std::cerr << *miit_row << std::endl; SETERRQ1(
466 // get_comm(),MOFEM_DATA_INCONSISTENCY,
467 // "data inconsistency row_last_evaluated_idx = %d",
468 // row_last_evaluated_idx
469 // );
470 } else {
471 dofs_vec.insert(dofs_vec.end(), mit->second.begin(),
472 mit->second.end());
473 }
474 }
475
476 // sort and make unique
477 sort(dofs_vec.begin(), dofs_vec.end());
478 std::vector<int>::iterator new_end =
479 unique(dofs_vec.begin(), dofs_vec.end());
480 int new_size = std::distance(dofs_vec.begin(), new_end);
481 dofs_vec.resize(new_size);
482 }
483
484 // Try to be smart reserving memory
485 if (j.capacity() < j.size() + dofs_vec.size()) {
486
487 // TODO: [CORE-55] Improve algorithm estimating size of compressed matrix
488 unsigned int nb_nonzero = j.size() + dofs_vec.size();
489 unsigned int average_row_fill = nb_nonzero / i.size() + 1;
490 j.reserve(rows_to_fill * average_row_fill);
491 }
492
493 auto hi_diit = dofs_vec.end();
494 for (auto diit = dofs_vec.begin(); diit != hi_diit; diit++) {
495
496 if (no_diagonals) {
497 if (*diit == TAG::get_index(miit_row)) {
498 continue;
499 }
500 }
501 j.push_back(*diit);
502 }
503 }
504
505 // build adj matrix
506 i.push_back(j.size());
507
508 if (strcmp(type, MATMPIADJ) == 0) {
509
510 // Adjacency matrix used to partition problems, f.e. METIS
511 if (i.size() - 1 != (unsigned int)nb_loc_row_from_iterators) {
512 SETERRQ2(
513 get_comm(), PETSC_ERR_ARG_SIZ,
514 "Number of rows from iterator is different than size of rows in "
515 "compressed row "
516 "matrix (unsigned int)nb_local_dofs_row != i.size() - 1, i.e. %d != "
517 "%d",
518 (unsigned int)nb_loc_row_from_iterators, i.size() - 1);
519 }
520
521 } else if (strcmp(type, MATMPIAIJ) == 0) {
522
523 // Compressed MPIADJ matrix
524 if (i.size() - 1 != (unsigned int)nb_loc_row_from_iterators) {
525 SETERRQ2(
526 get_comm(), PETSC_ERR_ARG_SIZ,
527 "Number of rows from iterator is different than size of rows in "
528 "compressed row "
529 "matrix (unsigned int)nb_local_dofs_row != i.size() - 1, i.e. %d != "
530 "%d",
531 (unsigned int)nb_loc_row_from_iterators, i.size() - 1);
532 }
533 PetscInt nb_local_dofs_row = p_miit->getNbLocalDofsRow();
534 if ((unsigned int)nb_local_dofs_row != i.size() - 1) {
535 SETERRQ2(
536 get_comm(), PETSC_ERR_ARG_SIZ,
537 "Number of rows is different than size of rows in compressed row "
538 "matrix (unsigned int)nb_local_dofs_row != i.size() - 1, i.e. %d != "
539 "%d",
540 (unsigned int)nb_local_dofs_row, i.size() - 1);
541 }
542
543 } else if (strcmp(type, MATAIJ) == 0) {
544
545 // Sequential compressed ADJ matrix
546 if (i.size() - 1 != (unsigned int)nb_loc_row_from_iterators) {
547 SETERRQ2(
548 get_comm(), PETSC_ERR_ARG_SIZ,
549 "Number of rows form iterator is different than size of rows in "
550 "compressed row "
551 "matrix (unsigned int)nb_local_dofs_row != i.size() - 1, i.e. %d != "
552 "%d",
553 (unsigned int)nb_loc_row_from_iterators, i.size() - 1);
554 }
555 PetscInt nb_local_dofs_row = p_miit->getNbLocalDofsRow();
556 if ((unsigned int)nb_local_dofs_row != i.size() - 1) {
557 SETERRQ2(
558 get_comm(), PETSC_ERR_ARG_SIZ,
559 "Number of rows is different than size of rows in compressed row "
560 "matrix (unsigned int)nb_local_dofs_row != i.size() - 1, i.e. %d != "
561 "%d",
562 (unsigned int)nb_local_dofs_row, i.size() - 1);
563 }
564
565 } else if (strcmp(type, MATAIJCUSPARSE) == 0) {
566
567 if (i.size() - 1 != (unsigned int)nb_loc_row_from_iterators) {
568 SETERRQ(get_comm(), PETSC_ERR_ARG_SIZ, "data inconsistency");
569 }
570 PetscInt nb_local_dofs_row = p_miit->getNbLocalDofsRow();
571 if ((unsigned int)nb_local_dofs_row != i.size() - 1) {
572 SETERRQ(get_comm(), PETSC_ERR_ARG_SIZ, "data inconsistency");
573 }
574
575 } else {
576
577 SETERRQ(get_comm(), PETSC_ERR_ARG_NULL, "not implemented");
578 }
579
580 PetscLogEventEnd(MOFEM_EVENT_createMat, 0, 0, 0, 0);
582}
583
585MatrixManager::query_interface(boost::typeindex::type_index type_index,
586 UnknownInterface **iface) const {
587 *iface = const_cast<MatrixManager *>(this);
588 return 0;
589}
590
592 : cOre(const_cast<MoFEM::Core &>(core)) {
593 PetscLogEventRegister("MatrixManagerCreateMPIAIJ", 0,
595 PetscLogEventRegister("MatrixManagerCreateMPIAIJWithArrays", 0,
597 PetscLogEventRegister("MatrixManagerCreateMPIAdjWithArrays", 0,
599 PetscLogEventRegister("MatrixManagerCreateMPIAIJCUSPARSEWithArrays", 0,
601 PetscLogEventRegister("MatrixManagerCreateSeqAIJCUSPARSEWithArrays", 0,
603 PetscLogEventRegister("MatrixManagerCreateSeqAIJWithArrays", 0,
605 PetscLogEventRegister("MatrixManagerCheckMPIAIJWithArraysMatrixFillIn", 0,
607}
608
609template <>
610MoFEMErrorCode MatrixManager::createMPIAIJWithArrays<PetscGlobalIdx_mi_tag>(
611 const std::string name, Mat *Aij, int verb) {
613
614 MoFEM::CoreInterface &m_field = cOre;
616 static_cast<CreateRowComressedADJMatrix *>(&cOre);
617 PetscLogEventBegin(MOFEM_EVENT_createMPIAIJWithArrays, 0, 0, 0, 0);
618
619 auto problems_ptr = m_field.get_problems();
620 auto &prb = problems_ptr->get<Problem_mi_tag>();
621 auto p_miit = prb.find(name);
622 if (p_miit == prb.end()) {
623 SETERRQ1(m_field.get_comm(), MOFEM_NOT_FOUND,
624 "problem < %s > is not found (top tip: check spelling)",
625 name.c_str());
626 }
627
628 std::vector<int> i_vec, j_vec;
629 j_vec.reserve(10000);
631 p_miit, MATMPIAIJ, i_vec, j_vec, false, verb);
632
633 int nb_row_dofs = p_miit->getNbDofsRow();
634 int nb_col_dofs = p_miit->getNbDofsCol();
635 int nb_local_dofs_row = p_miit->getNbLocalDofsRow();
636 int nb_local_dofs_col = p_miit->getNbLocalDofsCol();
637
638 CHKERR ::MatCreateMPIAIJWithArrays(
639 m_field.get_comm(), nb_local_dofs_row, nb_local_dofs_col, nb_row_dofs,
640 nb_col_dofs, &*i_vec.begin(), &*j_vec.begin(), PETSC_NULL, Aij);
641
642 PetscLogEventEnd(MOFEM_EVENT_createMPIAIJWithArrays, 0, 0, 0, 0);
644}
645
646template <>
648MatrixManager::createMPIAIJCUSPARSEWithArrays<PetscGlobalIdx_mi_tag>(
649 const std::string name, Mat *Aij, int verb) {
651
652 MoFEM::CoreInterface &m_field = cOre;
654 static_cast<CreateRowComressedADJMatrix *>(&cOre);
655 PetscLogEventBegin(MOFEM_EVENT_createMPIAIJCUSPARSEWithArrays, 0, 0, 0, 0);
656
657 auto problems_ptr = m_field.get_problems();
658 auto &prb = problems_ptr->get<Problem_mi_tag>();
659 auto p_miit = prb.find(name);
660 if (p_miit == prb.end()) {
661 SETERRQ1(m_field.get_comm(), MOFEM_NOT_FOUND,
662 "problem < %s > is not found (top tip: check spelling)",
663 name.c_str());
664 }
665
666 std::vector<int> i_vec, j_vec;
667 j_vec.reserve(10000);
669 p_miit, MATAIJCUSPARSE, i_vec, j_vec, false, verb);
670
671 int nb_local_dofs_row = p_miit->getNbLocalDofsRow();
672 int nb_local_dofs_col = p_miit->getNbLocalDofsCol();
673
674 auto get_layout = [&]() {
675 int start_ranges, end_ranges;
676 PetscLayout layout;
677 CHKERR PetscLayoutCreate(m_field.get_comm(), &layout);
678 CHKERR PetscLayoutSetBlockSize(layout, 1);
679 CHKERR PetscLayoutSetLocalSize(layout, nb_local_dofs_col);
680 CHKERR PetscLayoutSetUp(layout);
681 CHKERR PetscLayoutGetRange(layout, &start_ranges, &end_ranges);
682 CHKERR PetscLayoutDestroy(&layout);
683 return std::make_pair(start_ranges, end_ranges);
684 };
685
686 auto get_nnz = [&](auto &d_nnz, auto &o_nnz) {
688 auto layout = get_layout();
689 int j = 0;
690 for (int i = 0; i != nb_local_dofs_row; ++i) {
691 for (; j != i_vec[i + 1]; ++j) {
692 if (j_vec[j] < layout.second && j_vec[j] >= layout.first)
693 ++(d_nnz[i]);
694 else
695 ++(o_nnz[i]);
696 }
697 }
699 };
700
701 std::vector<int> d_nnz(nb_local_dofs_row, 0), o_nnz(nb_local_dofs_row, 0);
702 CHKERR get_nnz(d_nnz, o_nnz);
703
704#ifdef PETSC_HAVE_CUDA
705 CHKERR ::MatCreateAIJCUSPARSE(m_field.get_comm(), nb_local_dofs_row,
706 nb_local_dofs_col, nb_row_dofs, nb_col_dofs, 0,
707 &*d_nnz.begin(), 0, &*o_nnz.begin(), Aij);
708#else
709 SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED,
710 "Error: To use this matrix type compile PETSc with CUDA.");
711#endif
712
713 PetscLogEventEnd(MOFEM_EVENT_createMPIAIJCUSPARSEWithArrays, 0, 0, 0, 0);
714
716}
717
718template <>
720MatrixManager::createSeqAIJCUSPARSEWithArrays<PetscLocalIdx_mi_tag>(
721 const std::string name, Mat *Aij, int verb) {
723
724#ifdef PETSC_HAVE_CUDA
725 // CHKERR ::MatCreateSeqAIJCUSPARSE(MPI_Comm comm, PetscInt m, PetscInt n,
726 // PetscInt nz, const PetscInt nnz[], Mat *A);
727 SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED,
728 "Not implemented type of matrix yet, try MPI version (aijcusparse)");
729#endif
730
732}
733
734template <>
736MatrixManager::createMPIAIJ<PetscGlobalIdx_mi_tag>(const std::string name,
737 Mat *Aij, int verb) {
738 MoFEM::CoreInterface &m_field = cOre;
740 static_cast<CreateRowComressedADJMatrix *>(&cOre);
742 PetscLogEventBegin(MOFEM_EVENT_createMPIAIJ, 0, 0, 0, 0);
743
744 auto problems_ptr = m_field.get_problems();
745 auto &prb = problems_ptr->get<Problem_mi_tag>();
746 auto p_miit = prb.find(name);
747 if (p_miit == prb.end()) {
748 SETERRQ1(m_field.get_comm(), MOFEM_NOT_FOUND,
749 "problem < %s > is not found (top tip: check spelling)",
750 name.c_str());
751 }
752
753 std::vector<int> i_vec, j_vec;
754 j_vec.reserve(10000);
756 p_miit, MATMPIAIJ, i_vec, j_vec, false, verb);
757
758 int nb_row_dofs = p_miit->getNbDofsRow();
759 int nb_col_dofs = p_miit->getNbDofsCol();
760 int nb_local_dofs_row = p_miit->getNbLocalDofsRow();
761 int nb_local_dofs_col = p_miit->getNbLocalDofsCol();
762
763 auto get_layout = [&]() {
764 int start_ranges, end_ranges;
765 PetscLayout layout;
766 CHKERR PetscLayoutCreate(m_field.get_comm(), &layout);
767 CHKERR PetscLayoutSetBlockSize(layout, 1);
768 CHKERR PetscLayoutSetLocalSize(layout, nb_local_dofs_col);
769 CHKERR PetscLayoutSetUp(layout);
770 CHKERR PetscLayoutGetRange(layout, &start_ranges, &end_ranges);
771 CHKERR PetscLayoutDestroy(&layout);
772 return std::make_pair(start_ranges, end_ranges);
773 };
774
775 auto get_nnz = [&](auto &d_nnz, auto &o_nnz) {
777 auto layout = get_layout();
778 int j = 0;
779 for (int i = 0; i != nb_local_dofs_row; ++i) {
780 for (; j != i_vec[i + 1]; ++j) {
781 if (j_vec[j] < layout.second && j_vec[j] >= layout.first)
782 ++(d_nnz[i]);
783 else
784 ++(o_nnz[i]);
785 }
786 }
788 };
789
790 std::vector<int> d_nnz(nb_local_dofs_row, 0), o_nnz(nb_local_dofs_row, 0);
791 CHKERR get_nnz(d_nnz, o_nnz);
792
793 CHKERR MatCreate(m_field.get_comm(), Aij);
794 CHKERR MatSetSizes(*Aij, nb_local_dofs_row, nb_local_dofs_col, nb_row_dofs,
795 nb_col_dofs);
796 CHKERR MatSetType(*Aij, MATMPIAIJ);
797 CHKERR MatMPIAIJSetPreallocation(*Aij, 0, &*d_nnz.begin(), 0,
798 &*o_nnz.begin());
799
800 PetscLogEventEnd(MOFEM_EVENT_createMPIAIJ, 0, 0, 0, 0);
802}
803
804template <>
806MatrixManager::createMPIAdjWithArrays<Idx_mi_tag>(const std::string name,
807 Mat *Adj, int verb) {
808 MoFEM::CoreInterface &m_field = cOre;
810 static_cast<CreateRowComressedADJMatrix *>(&cOre);
812 PetscLogEventBegin(MOFEM_EVENT_createMPIAdjWithArrays, 0, 0, 0, 0);
813
814 auto problems_ptr = m_field.get_problems();
815 auto &prb = problems_ptr->get<Problem_mi_tag>();
816 auto p_miit = prb.find(name);
817 if (p_miit == prb.end()) {
818 SETERRQ1(m_field.get_comm(), MOFEM_NOT_FOUND,
819 "problem < %s > is not found (top tip: check spelling)",
820 name.c_str());
821 }
822
823 std::vector<int> i_vec, j_vec;
824 j_vec.reserve(10000);
825 CHKERR core_ptr->createMatArrays<Idx_mi_tag>(p_miit, MATMPIADJ, i_vec, j_vec,
826 true, verb);
827 int *_i, *_j;
828 CHKERR PetscMalloc(i_vec.size() * sizeof(int), &_i);
829 CHKERR PetscMalloc(j_vec.size() * sizeof(int), &_j);
830 copy(i_vec.begin(), i_vec.end(), _i);
831 copy(j_vec.begin(), j_vec.end(), _j);
832
833 int nb_col_dofs = p_miit->getNbDofsCol();
834 CHKERR MatCreateMPIAdj(m_field.get_comm(), i_vec.size() - 1, nb_col_dofs, _i,
835 _j, PETSC_NULL, Adj);
836 CHKERR MatSetOption(*Adj, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
837
838 PetscLogEventEnd(MOFEM_EVENT_createMPIAdjWithArrays, 0, 0, 0, 0);
840}
841
842template <>
843MoFEMErrorCode MatrixManager::createSeqAIJWithArrays<PetscLocalIdx_mi_tag>(
844 const std::string name, Mat *Aij, int verb) {
845 MoFEM::CoreInterface &m_field = cOre;
847 static_cast<CreateRowComressedADJMatrix *>(&cOre);
849 PetscLogEventBegin(MOFEM_EVENT_createMPIAIJWithArrays, 0, 0, 0, 0);
850
851 auto problems_ptr = m_field.get_problems();
852 auto &prb = problems_ptr->get<Problem_mi_tag>();
853 auto p_miit = prb.find(name);
854 if (p_miit == prb.end()) {
855 SETERRQ1(m_field.get_comm(), MOFEM_NOT_FOUND,
856 "problem < %s > is not found (top tip: check spelling)",
857 name.c_str());
858 }
859
860 std::vector<int> i_vec, j_vec;
861 j_vec.reserve(10000);
862 CHKERR core_ptr->createMatArrays<PetscGlobalIdx_mi_tag>(p_miit, MATAIJ, i_vec,
863 j_vec, false, verb);
864
865 int nb_local_dofs_row = p_miit->getNbLocalDofsRow();
866 int nb_local_dofs_col = p_miit->getNbLocalDofsCol();
867
868 double *_a;
869 CHKERR PetscMalloc(j_vec.size() * sizeof(double), &_a);
870
871 Mat tmpMat;
872 CHKERR ::MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, nb_local_dofs_row,
873 nb_local_dofs_col, &*i_vec.begin(),
874 &*j_vec.begin(), _a, &tmpMat);
875 CHKERR MatDuplicate(tmpMat, MAT_SHARE_NONZERO_PATTERN, Aij);
876 CHKERR MatDestroy(&tmpMat);
877
878 CHKERR PetscFree(_a);
879
880 PetscLogEventEnd(MOFEM_EVENT_createMPIAIJWithArrays, 0, 0, 0, 0);
882}
883
885 int row_print, int col_print,
886 Mat A, int verb) {
887 MoFEM::CoreInterface &m_field = cOre;
889
890 PetscLogEventBegin(MOFEM_EVENT_checkMatrixFillIn, 0, 0, 0, 0);
891
892 struct TestMatrixFillIn : public FEMethod {
893 CoreInterface *mFieldPtr;
894
895 Mat A;
896
897 int rowPrint, colPrint;
898
899 TestMatrixFillIn(CoreInterface *m_field_ptr, Mat a, int row_print,
900 int col_print)
901 : mFieldPtr(m_field_ptr), A(a), rowPrint(row_print),
902 colPrint(col_print){};
903
904 MoFEMErrorCode preProcess() {
907 }
908
909 MoFEMErrorCode operator()() {
911
912 if (refinedFiniteElementsPtr->find(
913 numeredEntFiniteElementPtr->getEnt()) ==
914 refinedFiniteElementsPtr->end()) {
915 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
916 "data inconsistency");
917 }
918
919 auto row_dofs = getRowDofsPtr();
920 auto col_dofs = getColDofsPtr();
921
922 for (auto cit = col_dofs->begin(); cit != col_dofs->end(); cit++) {
923
924 if (refinedEntitiesPtr->find((*cit)->getEnt()) ==
925 refinedEntitiesPtr->end()) {
926 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
927 "data inconsistency");
928 }
929 if (!(*cit)->getActive()) {
930 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
931 "data inconsistency");
932 }
933
934 FieldEntityEntFiniteElementAdjacencyMap_multiIndex::index<
935 Composite_Unique_mi_tag>::type::iterator ait;
936 ait = adjacenciesPtr->get<Composite_Unique_mi_tag>().find(
937 boost::make_tuple((*cit)->getFieldEntityPtr()->getLocalUniqueId(),
938 numeredEntFiniteElementPtr->getLocalUniqueId()));
939 if (ait == adjacenciesPtr->end()) {
940 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
941 "adjacencies data inconsistency");
942 } else {
943 UId uid = ait->getEntUniqueId();
944 if (entitiesPtr->find(uid) == entitiesPtr->end()) {
945 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
946 "data inconsistency");
947 }
948 if (dofsPtr->find((*cit)->getLocalUniqueId()) == dofsPtr->end()) {
949 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
950 "data inconsistency");
951 }
952 }
953
954 if ((*cit)->getEntType() != MBVERTEX) {
955
956 auto range =
957 col_dofs->get<Ent_mi_tag>().equal_range((*cit)->getEnt());
958 int nb_dofs_on_ent = std::distance(range.first, range.second);
959
960 int max_order = (*cit)->getMaxOrder();
961 if ((*cit)->getNbOfCoeffs() * (*cit)->getOrderNbDofs(max_order) !=
962 nb_dofs_on_ent) {
963
964 /* It could be that you have
965 removed DOFs from problem, and for example if this was vector filed
966 with components {Ux,Uy,Uz}, you removed on Uz element.*/
967
968 MOFEM_LOG("SELF", Sev::warning)
969 << "Warning: Number of Dofs in Col different than number "
970 "of dofs for given entity order "
971 << (*cit)->getNbOfCoeffs() * (*cit)->getOrderNbDofs(max_order)
972 << " " << nb_dofs_on_ent;
973 }
974 }
975 }
976
977 for (auto rit = row_dofs->begin(); rit != row_dofs->end(); rit++) {
978
979 if (refinedEntitiesPtr->find((*rit)->getEnt()) ==
980 refinedEntitiesPtr->end()) {
981 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
982 "data inconsistency");
983 }
984 if (!(*rit)->getActive()) {
985 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
986 "data inconsistency");
987 }
988
989 FieldEntityEntFiniteElementAdjacencyMap_multiIndex::index<
990 Composite_Unique_mi_tag>::type::iterator ait;
991 ait = adjacenciesPtr->get<Composite_Unique_mi_tag>().find(
992 boost::make_tuple((*rit)->getFieldEntityPtr()->getLocalUniqueId(),
993 numeredEntFiniteElementPtr->getLocalUniqueId()));
994 if (ait == adjacenciesPtr->end()) {
995 MOFEM_LOG_ATTRIBUTES("SYNC", LogManager::BitScope);
996 MOFEM_LOG("SELF", Sev::error) << *(*rit);
997 MOFEM_LOG("SELF", Sev::error) << *(*rit);
998 MOFEM_LOG("SELF", Sev::error) << *numeredEntFiniteElementPtr;
999 MOFEM_LOG("SELF", Sev::error) << "dof: " << (*rit)->getBitRefLevel();
1000 MOFEM_LOG("SELF", Sev::error)
1001 << "fe: " << numeredEntFiniteElementPtr->getBitRefLevel();
1002 MOFEM_LOG("SELF", Sev::error)
1003 << "problem: " << problemPtr->getBitRefLevel();
1004 MOFEM_LOG("SELF", Sev::error)
1005 << "problem mask: " << problemPtr->getBitRefLevelMask();
1006 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
1007 "adjacencies data inconsistency");
1008 } else {
1009 UId uid = ait->getEntUniqueId();
1010 if (entitiesPtr->find(uid) == entitiesPtr->end()) {
1011 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
1012 "data inconsistency");
1013 }
1014 if (dofsPtr->find((*rit)->getLocalUniqueId()) == dofsPtr->end()) {
1015 SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
1016 "data inconsistency");
1017 }
1018 }
1019 int row = (*rit)->getPetscGlobalDofIdx();
1020
1021 auto col_dofs = getColDofsPtr();
1022 for (auto cit = col_dofs->begin(); cit != col_dofs->end(); cit++) {
1023
1024 int col = (*cit)->getPetscGlobalDofIdx();
1025
1026 if (row == rowPrint && col == colPrint) {
1027 MOFEM_LOG("SELF", Sev::noisy) << "fe:\n"
1028 << *numeredEntFiniteElementPtr;
1029 MOFEM_LOG("SELF", Sev::noisy) << "row:\n" << *(*rit);
1030 MOFEM_LOG("SELF", Sev::noisy) << "col:\n" << *(*cit);
1031 MOFEM_LOG("SELF", Sev::noisy)
1032 << "fe:\n"
1033 << numeredEntFiniteElementPtr->getBitRefLevel();
1034 MOFEM_LOG("SELF", Sev::noisy) << "row:\n"
1035 << (*rit)->getBitRefLevel();
1036 MOFEM_LOG("SELF", Sev::noisy) << "col:\n"
1037 << (*cit)->getBitRefLevel();
1038 }
1039
1040 CHKERR MatSetValue(A, row, col, 1, INSERT_VALUES);
1041 }
1042
1043 if ((*rit)->getEntType() != MBVERTEX) {
1044
1045 auto range =
1046 row_dofs->get<Ent_mi_tag>().equal_range((*rit)->getEnt());
1047 int nb_dofs_on_ent = std::distance(range.first, range.second);
1048
1049 int max_order = (*rit)->getMaxOrder();
1050 if ((*rit)->getNbOfCoeffs() * (*rit)->getOrderNbDofs(max_order) !=
1051 nb_dofs_on_ent) {
1052
1053 /* It could be that you have removed DOFs from problem, and for
1054 * example if this was vector filed with components {Ux,Uy,Uz}, you
1055 * removed on Uz element. */
1056
1057 MOFEM_LOG("SELF", Sev::warning)
1058 << "Warning: Number of Dofs in Row different than number "
1059 "of dofs for given entity order "
1060 << (*rit)->getNbOfCoeffs() * (*rit)->getOrderNbDofs(max_order)
1061 << " " << nb_dofs_on_ent;
1062 }
1063 }
1064 }
1065
1067 }
1068
1069 MoFEMErrorCode postProcess() {
1071
1072 // cerr << mFieldPtr->get_comm_rank() << endl;
1073 CHKERR MatAssemblyBegin(A, MAT_FLUSH_ASSEMBLY);
1074 CHKERR MatAssemblyEnd(A, MAT_FLUSH_ASSEMBLY);
1075
1077 }
1078 };
1079
1080 // create matrix
1081 CHKERR MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE);
1082
1083 if (verb >= VERY_VERBOSE) {
1084 MatView(A, PETSC_VIEWER_STDOUT_WORLD);
1085 }
1086
1087 if (verb >= NOISY) {
1088 MatView(A, PETSC_VIEWER_DRAW_WORLD);
1089 std::string wait;
1090 std::cin >> wait;
1091 }
1092
1093 TestMatrixFillIn method(&m_field, A, row_print, col_print);
1094
1095 // get problem
1096 auto problems_ptr = m_field.get_problems();
1097 auto &prb_set = problems_ptr->get<Problem_mi_tag>();
1098 auto p_miit = prb_set.find(problem_name);
1099 if (p_miit == prb_set.end())
1100 SETERRQ1(m_field.get_comm(), MOFEM_DATA_INCONSISTENCY,
1101 "problem < %s > not found (top tip: check spelling)",
1102 problem_name.c_str());
1103 MOFEM_LOG_C("WORLD", Sev::inform, "check problem < %s >",
1104 problem_name.c_str());
1105
1106 // loop all elements in problem and check if assemble is without error
1107 auto fe_ptr = m_field.get_finite_elements();
1108 for (auto &fe : *fe_ptr) {
1109 MOFEM_LOG_C("WORLD", Sev::verbose, "\tcheck element %s",
1110 fe->getName().c_str());
1111 CHKERR m_field.loop_finite_elements(problem_name, fe->getName(), method,
1112 nullptr, MF_EXIST,
1113 CacheTupleSharedPtr(), verb);
1114 }
1115
1116 CHKERR MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
1117 CHKERR MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
1118
1119 PetscLogEventEnd(MOFEM_EVENT_checkMatrixFillIn, 0, 0, 0, 0);
1120
1122}
1123
1124template <>
1126MatrixManager::checkMPIAIJWithArraysMatrixFillIn<PetscGlobalIdx_mi_tag>(
1127 const std::string problem_name, int row_print, int col_print, int verb) {
1129 // create matrix
1131 CHKERR createMPIAIJWithArrays<PetscGlobalIdx_mi_tag>(problem_name, A, verb);
1132 CHKERR MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE);
1133 CHKERR checkMatrixFillIn(problem_name, row_print, col_print, A, verb);
1135}
1136
1137template <>
1138MoFEMErrorCode MatrixManager::checkMPIAIJMatrixFillIn<PetscGlobalIdx_mi_tag>(
1139 const std::string problem_name, int row_print, int col_print, int verb) {
1141 // create matrix
1143 CHKERR createMPIAIJ<PetscGlobalIdx_mi_tag>(problem_name, A, verb);
1144 CHKERR MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE);
1145 CHKERR checkMatrixFillIn(problem_name, row_print, col_print, A, verb);
1147}
1148
1149} // namespace MoFEM
#define MOFEM_LOG_SYNCHRONISE(comm)
Synchronise "SYNC" channel.
Definition: LogManager.hpp:345
#define MOFEM_LOG_C(channel, severity, format,...)
Definition: LogManager.hpp:311
#define MatrixManagerFunctionBegin
Create adjacent matrices using different indices.
constexpr double a
@ VERY_VERBOSE
Definition: definitions.h:210
@ QUIET
Definition: definitions.h:208
@ NOISY
Definition: definitions.h:211
@ VERBOSE
Definition: definitions.h:209
@ MF_EXIST
Definition: definitions.h:100
#define MAX_DOFS_ON_ENTITY
Maximal number of DOFs on entity.
Definition: definitions.h:236
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:447
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:346
@ MOFEM_NOT_FOUND
Definition: definitions.h:33
@ MOFEM_DATA_INCONSISTENCY
Definition: definitions.h:31
@ MOFEM_NOT_IMPLEMENTED
Definition: definitions.h:32
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:416
@ BYROW
Definition: definitions.h:131
#define CHKERR
Inline error check.
Definition: definitions.h:535
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:440
static const bool debug
multi_index_container< boost::shared_ptr< NumeredDofEntity >, indexed_by< ordered_unique< tag< Unique_mi_tag >, const_mem_fun< NumeredDofEntity::interface_type_DofEntity, UId, &NumeredDofEntity::getLocalUniqueId > >, ordered_non_unique< tag< Part_mi_tag >, member< NumeredDofEntity, unsigned int, &NumeredDofEntity::pArt > >, ordered_non_unique< tag< Idx_mi_tag >, member< NumeredDofEntity, DofIdx, &NumeredDofEntity::dofIdx > >, ordered_non_unique< tag< PetscGlobalIdx_mi_tag >, member< NumeredDofEntity, DofIdx, &NumeredDofEntity::petscGloablDofIdx > >, ordered_non_unique< tag< PetscLocalIdx_mi_tag >, member< NumeredDofEntity, DofIdx, &NumeredDofEntity::petscLocalDofIdx > >, ordered_non_unique< tag< Ent_mi_tag >, const_mem_fun< NumeredDofEntity::interface_type_DofEntity, EntityHandle, &NumeredDofEntity::getEnt > > > > NumeredDofEntity_multiIndex
MultiIndex container keeps NumeredDofEntity.
virtual const FiniteElement_multiIndex * get_finite_elements() const =0
Get the finite elements object.
virtual const Problem_multiIndex * get_problems() const =0
Get the problems object.
#define MOFEM_LOG(channel, severity)
Log.
Definition: LogManager.hpp:308
#define MOFEM_LOG_ATTRIBUTES(channel, bit)
Add attributes to channel.
Definition: LogManager.hpp:296
virtual MoFEMErrorCode loop_finite_elements(const std::string problem_name, const std::string &fe_name, FEMethod &method, boost::shared_ptr< NumeredEntFiniteElement_multiIndex > fe_ptr=nullptr, MoFEMTypes bh=MF_EXIST, CacheTupleWeakPtr cache_ptr=CacheTupleSharedPtr(), int verb=DEFAULT_VERBOSITY)=0
Make a loop over finite elements.
FTensor::Index< 'i', SPACE_DIM > i
FTensor::Index< 'j', 3 > j
BlockParamData * cache
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
Definition: Exceptions.hpp:56
int DofIdx
Index of DOF.
Definition: Types.hpp:18
uint128_t UId
Unique Id.
Definition: Types.hpp:31
std::bitset< BITREFLEVEL_SIZE > BitRefLevel
Bit structure attached to each entity identifying to what mesh entity is attached.
Definition: Types.hpp:40
implementation of Data Operators for Forces and Sources
Definition: Common.hpp:10
Problem::EmptyFieldBlocks EmptyFieldBlocks
boost::shared_ptr< CacheTuple > CacheTupleSharedPtr
constexpr AssemblyType A
virtual MPI_Comm & get_comm() const =0
Core (interface) class.
Definition: Core.hpp:82
MPI_Comm & get_comm() const
Definition: Core.hpp:975
PetscLogEvent MOFEM_EVENT_createMat
Definition: Core.hpp:958
FieldEntity_multiIndex entsFields
entities on fields
Definition: Core.hpp:304
int verbose
Verbosity level.
Definition: Core.hpp:993
int sIze
MoFEM communicator size.
Definition: Core.hpp:969
int rAnk
MOFEM communicator rank.
Definition: Core.hpp:970
MPI_Comm mofemComm
MoFEM communicator.
Definition: Core.hpp:966
std::reference_wrapper< moab::Interface > moab
moab database
Definition: Core.hpp:321
FieldEntityEntFiniteElementAdjacencyMap_multiIndex entFEAdjacencies
adjacencies of elements to dofs
Definition: Core.hpp:311
Create compressed matrix.
CreateRowComressedADJMatrix(moab::Interface &moab, MPI_Comm comm=PETSC_COMM_WORLD, int verbose=1)
MoFEMErrorCode getEntityAdjacenies(ProblemsByName::iterator p_miit, typename boost::multi_index::index< NumeredDofEntity_multiIndex, TAG >::type::iterator mit_row, boost::shared_ptr< FieldEntity > mofem_ent_ptr, std::vector< int > &dofs_col_view, int verb) const
Get element adjacencies.
FieldEntityEntFiniteElementAdjacencyMap_multiIndex::index< Unique_mi_tag >::type AdjByEnt
Problem_multiIndex::index< Problem_mi_tag >::type ProblemsByName
MoFEMErrorCode createMatArrays(ProblemsByName::iterator p_miit, const MatType type, std::vector< PetscInt > &i, std::vector< PetscInt > &j, const bool no_diagonals=true, int verb=QUIET) const
Create matrix adjacencies.
NumeredDofEntity_multiIndex::index< PetscGlobalIdx_mi_tag >::type DofByGlobalPetscIndex
structure for User Loop Methods on finite elements
Matrix manager is used to build and partition problems.
MoFEMErrorCode query_interface(boost::typeindex::type_index type_index, UnknownInterface **iface) const
PetscLogEvent MOFEM_EVENT_createSeqAIJCUSPARSEWithArrays
PetscLogEvent MOFEM_EVENT_createMPIAdjWithArrays
PetscLogEvent MOFEM_EVENT_checkMatrixFillIn
PetscLogEvent MOFEM_EVENT_createMPIAIJ
PetscLogEvent MOFEM_EVENT_createMPIAIJWithArrays
MoFEMErrorCode checkMatrixFillIn(const std::string problem_name, int row_print, int col_print, Mat A, int verb=QUIET)
check if matrix fill in correspond to finite element indices
PetscLogEvent MOFEM_EVENT_createSeqAIJWithArrays
MatrixManager(const MoFEM::Core &core)
PetscLogEvent MOFEM_EVENT_createMPIAIJCUSPARSEWithArrays
intrusive_ptr for managing petsc objects
base class for all interface classes