v0.10.0
MatrixManager.cpp
Go to the documentation of this file.
1 /**
2  * \brief Create adjacent matrices using different indices
3 
4  * MoFEM is distributed in the hope that it will be useful, but WITHOUT
5  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
7  * License for more details.
8  *
9  * You should have received a copy of the GNU Lesser General Public
10  * License along with MoFEM. If not, see <http://www.gnu.org/licenses/>
11 
12 */
13 
14 #define MatrixManagerFunctionBegin \
15  MoFEMFunctionBegin; \
16  MOFEM_LOG_CHANNEL("WORLD"); \
17  MOFEM_LOG_CHANNEL("SYNC"); \
18  MOFEM_LOG_FUNCTION(); \
19  MOFEM_LOG_TAG("SYNC", "MatrixManager"); \
20  MOFEM_LOG_TAG("WORLD", "MatrixManager")
21 
22 namespace MoFEM {
23 
24 /** \brief Create compressed matrix
25 
26  \note Only function class members are allowed in this class. NO VARIABLES.
27 
28  \todo It is obsolete implementation, code should be moved to interface
29  MatrixManager.
30 
31  \todo While matrix is created is assumed that all entities on element are
32  adjacent to each other, in some cases, this is created denser matrices than it
33  should be. Some kind of filtering can be added.
34 
35  \todo Creation of the block matrices
36 
37  \todo Some efficiency improvemnt are possible
38 
39 
40  */
42 
44  MPI_Comm comm = PETSC_COMM_WORLD, int verbose = 1)
45  : Core(moab, comm, verbose) {}
46 
47  typedef FieldEntityEntFiniteElementAdjacencyMap_multiIndex::index<
52 
53  /** \brief Create matrix adjacencies
54 
55  Depending on TAG type, which some structure used to number dofs, matrix is
56  partitioned using part number stored in multi-index, or is partitioned on
57  parts based only on index number.
58 
59  See: Idx_mi_tag PetscGlobalIdx_mi_tag and PetscLocalIdx_mi_tag
60 
61  */
62  template <typename TAG>
64  createMatArrays(ProblemsByName::iterator p_miit, const MatType type,
65  std::vector<PetscInt> &i, std::vector<PetscInt> &j,
66  const bool no_diagonals = true, int verb = QUIET) const;
67 
68  /** \brief Get element adjacencies
69  */
70  template <typename TAG>
72  ProblemsByName::iterator p_miit,
73  typename boost::multi_index::index<NumeredDofEntity_multiIndex,
74  TAG>::type::iterator mit_row,
75  boost::shared_ptr<FieldEntity> mofem_ent_ptr,
76  std::vector<int> &dofs_col_view, int verb) const;
77 };
78 
79 template <typename TAG>
81  ProblemsByName::iterator p_miit,
82  typename boost::multi_index::index<NumeredDofEntity_multiIndex,
83  TAG>::type::iterator mit_row,
84  boost::shared_ptr<FieldEntity> mofem_ent_ptr,
85  std::vector<int> &dofs_col_view, int verb) const {
87 
88  BitRefLevel prb_bit = p_miit->getBitRefLevel();
89  BitRefLevel prb_mask = p_miit->getMaskBitRefLevel();
90 
91  const DofIdx nb_dofs_col = p_miit->getNbDofsCol();
92 
93  dofs_col_view.clear();
94  for (auto r = entFEAdjacencies.get<Unique_mi_tag>().equal_range(
95  mofem_ent_ptr->getLocalUniqueId());
96  r.first != r.second; ++r.first) {
97 
98  if (r.first->byWhat & BYROW) {
99 
100  if ((r.first->entFePtr->getId() & p_miit->getBitFEId()).none()) {
101  // if element is not part of problem
102  continue;
103  }
104 
105  const BitRefLevel fe_bit = r.first->entFePtr->getBitRefLevel();
106  // if entity is not problem refinement level
107  if ((fe_bit & prb_mask) != fe_bit)
108  continue;
109  if ((fe_bit & prb_bit) != prb_bit)
110  continue;
111  const BitRefLevel dof_bit = mit_row->get()->getBitRefLevel();
112 
113  // if entity is not problem refinement level
114  if ((fe_bit & dof_bit).any()) {
115 
116  for (auto &it : r.first->entFePtr->getColFieldEnts()) {
117  if (auto e = it.lock()) {
118  if (auto cache = e->entityCacheColDofs.lock()) {
119  const auto lo = cache->loHi[0];
120  const auto hi = cache->loHi[1];
121  for (auto vit = lo; vit != hi; ++vit) {
122  const int idx = TAG::get_index(vit);
123  if (PetscLikely(idx >= 0))
124  dofs_col_view.push_back(idx);
125  if (PetscUnlikely(idx >= nb_dofs_col)) {
126  std::ostringstream zz;
127  zz << "rank " << rAnk << " ";
128  zz << *(*vit) << std::endl;
129  SETERRQ(cOmm, PETSC_ERR_ARG_SIZ, zz.str().c_str());
130  }
131  }
132  } else
133  SETERRQ(cOmm, MOFEM_DATA_INCONSISTENCY, "Cache not set");
134  }
135  }
136  }
137  }
138  }
139 
141 }
142 
143 template <typename TAG>
145  ProblemsByName::iterator p_miit, const MatType type,
146  std::vector<PetscInt> &i, std::vector<PetscInt> &j, const bool no_diagonals,
147  int verb) const {
149 
150  PetscLogEventBegin(MOFEM_EVENT_createMat, 0, 0, 0, 0);
151 
152  auto cache = boost::make_shared<std::vector<EntityCacheNumeredDofs>>(
153  entsFields.size());
154 
155  size_t idx = 0;
156  for (auto it = entsFields.begin(); it != entsFields.end(); ++it, ++idx) {
157 
158  const auto uid = (*it)->getLocalUniqueId();
159  auto r = entFEAdjacencies.get<Unique_mi_tag>().equal_range(uid);
160  for (auto lo = r.first; lo != r.second; ++lo) {
161 
162  if ((lo->getBitFEId() & p_miit->getBitFEId()).any()) {
163 
164  const BitRefLevel prb_bit = p_miit->getBitRefLevel();
165  const BitRefLevel prb_mask = p_miit->getMaskBitRefLevel();
166  const BitRefLevel fe_bit = lo->entFePtr->getBitRefLevel();
167 
168  // if entity is not problem refinement level
169  if ((fe_bit & prb_mask) != fe_bit)
170  continue;
171  if ((fe_bit & prb_bit) != prb_bit)
172  continue;
173 
174  auto dit = p_miit->numeredColDofs->lower_bound(uid);
175 
176  decltype(dit) hi_dit;
177  if (dit != p_miit->numeredColDofs->end())
178  hi_dit = p_miit->numeredColDofs->upper_bound(
179  uid | static_cast<UId>(MAX_DOFS_ON_ENTITY - 1));
180  else
181  hi_dit = dit;
182 
183  (*it)->entityCacheColDofs =
184  boost::shared_ptr<EntityCacheNumeredDofs>(cache, &((*cache)[idx]));
185  (*cache)[idx].loHi = {dit, hi_dit};
186 
187  break;
188  }
189  }
190  }
191 
192  using NumeredDofEntitysByIdx =
193  typename boost::multi_index::index<NumeredDofEntity_multiIndex,
194  TAG>::type;
195 
196  // Get multi-indices for rows and columns
197  const NumeredDofEntitysByIdx &dofs_row_by_idx =
198  p_miit->numeredRowDofs->get<TAG>();
199  int nb_dofs_row = p_miit->getNbDofsRow();
200  if (nb_dofs_row == 0) {
201  SETERRQ1(cOmm, MOFEM_DATA_INCONSISTENCY, "problem <%s> has zero rows",
202  p_miit->getName().c_str());
203  }
204 
205  // Get adjacencies form other processors
206  std::map<int, std::vector<int>> adjacent_dofs_on_other_parts;
207 
208  // If not partitioned set petsc layout for matrix. If partitioned need to get
209  // adjacencies form other parts. Note if algebra is only partitioned no need
210  // to collect adjacencies form other entities. Those are already on mesh
211  // which is assumed that is on each processor the same.
212  typename boost::multi_index::index<NumeredDofEntity_multiIndex,
213  TAG>::type::iterator miit_row,
214  hi_miit_row;
215 
216  if (TAG::IamNotPartitioned) {
217 
218  // Get range of local indices
219  PetscLayout layout;
220  CHKERR PetscLayoutCreate(cOmm, &layout);
221  CHKERR PetscLayoutSetBlockSize(layout, 1);
222  CHKERR PetscLayoutSetSize(layout, nb_dofs_row);
223  CHKERR PetscLayoutSetUp(layout);
224  PetscInt rstart, rend;
225  CHKERR PetscLayoutGetRange(layout, &rstart, &rend);
226  CHKERR PetscLayoutDestroy(&layout);
227 
228  if (verb >= VERBOSE) {
229  MOFEM_LOG("SYNC", Sev::noisy)
230  << "row lower " << rstart << " row upper " << rend;
232  }
233 
234  miit_row = dofs_row_by_idx.lower_bound(rstart);
235  hi_miit_row = dofs_row_by_idx.lower_bound(rend);
236  if (std::distance(miit_row, hi_miit_row) != rend - rstart) {
237  SETERRQ4(
238  cOmm, PETSC_ERR_ARG_SIZ,
239  "data inconsistency, std::distance(miit_row,hi_miit_row) != rend - "
240  "rstart (%d != %d - %d = %d) ",
241  std::distance(miit_row, hi_miit_row), rend, rstart, rend - rstart);
242  }
243 
244  } else {
245 
246  // Make sure it is a PETSc cOmm
247  CHKERR PetscCommDuplicate(cOmm, &cOmm, NULL);
248 
249  // get adjacent nodes on other partitions
250  std::vector<std::vector<int>> dofs_vec(sIze);
251 
252  boost::shared_ptr<FieldEntity> mofem_ent_ptr;
253  std::vector<int> dofs_col_view;
254 
255  typename boost::multi_index::index<NumeredDofEntity_multiIndex,
256  TAG>::type::iterator mit_row,
257  hi_mit_row;
258 
259  mit_row = dofs_row_by_idx.begin();
260  hi_mit_row = dofs_row_by_idx.end();
261  for (; mit_row != hi_mit_row; mit_row++) {
262 
263  // Shared or multishared row and not owned. Those data should be send to
264  // other side.
265 
266  // Get entity adjacencies, no need to repeat that operation for dofs when
267  // are on the same entity. For simplicity is assumed that those sheered
268  // the same adjacencies.
269  unsigned char pstatus = (*mit_row)->getPStatus();
270  if ((pstatus & PSTATUS_NOT_OWNED) &&
271  (pstatus & (PSTATUS_SHARED | PSTATUS_MULTISHARED))) {
272 
273  bool get_adj_col = true;
274  if (mofem_ent_ptr) {
275  if (mofem_ent_ptr->getLocalUniqueId() ==
276  (*mit_row)->getFieldEntityPtr()->getLocalUniqueId()) {
277  get_adj_col = false;
278  }
279  }
280 
281  if (get_adj_col) {
282  // Get entity adjacencies
283  mofem_ent_ptr = (*mit_row)->getFieldEntityPtr();
284  CHKERR getEntityAdjacenies<TAG>(p_miit, mit_row, mofem_ent_ptr,
285  dofs_col_view, verb);
286  // Sort, unique and resize dofs_col_view
287  {
288  std::sort(dofs_col_view.begin(), dofs_col_view.end());
289  std::vector<int>::iterator new_end =
290  std::unique(dofs_col_view.begin(), dofs_col_view.end());
291  int new_size = std::distance(dofs_col_view.begin(), new_end);
292  dofs_col_view.resize(new_size);
293  }
294  // Add that row. Patterns is that first index is row index, second is
295  // size of adjacencies after that follows column adjacencies.
296  int owner = (*mit_row)->getOwnerProc();
297  dofs_vec[owner].emplace_back(TAG::get_index(mit_row)); // row index
298  dofs_vec[owner].emplace_back(
299  dofs_col_view.size()); // nb. of column adjacencies
300  // add adjacent cools
301  dofs_vec[owner].insert(dofs_vec[owner].end(), dofs_col_view.begin(),
302  dofs_col_view.end());
303  }
304  }
305  }
306 
307  int nsends = 0; // number of messages to send
308  std::vector<int> dofs_vec_length(sIze); // length of the message to proc
309  for (int proc = 0; proc < sIze; proc++) {
310 
311  if (!dofs_vec[proc].empty()) {
312 
313  dofs_vec_length[proc] = dofs_vec[proc].size();
314  nsends++;
315 
316  } else {
317 
318  dofs_vec_length[proc] = 0;
319  }
320  }
321 
322  std::vector<MPI_Status> status(sIze);
323 
324  // Computes the number of messages a node expects to receive
325  int nrecvs; // number of messages received
326  CHKERR PetscGatherNumberOfMessages(cOmm, NULL, &dofs_vec_length[0],
327  &nrecvs);
328 
329  // Computes info about messages that a MPI-node will receive, including
330  // (from-id,length) pairs for each message.
331  int *onodes; // list of node-ids from which messages are expected
332  int *olengths; // corresponding message lengths
333  CHKERR PetscGatherMessageLengths(cOmm, nsends, nrecvs, &dofs_vec_length[0],
334  &onodes, &olengths);
335 
336  // Gets a unique new tag from a PETSc communicator.
337  int tag;
338  CHKERR PetscCommGetNewTag(cOmm, &tag);
339 
340  // Allocate a buffer sufficient to hold messages of size specified in
341  // olengths. And post Irecvs on these buffers using node info from onodes
342  int **rbuf; // must bee freed by user
343  MPI_Request *r_waits; // must bee freed by user
344 
345  // rbuf has a pointers to messages. It has size of of nrecvs (number of
346  // messages) +1. In the first index a block is allocated,
347  // such that rbuf[i] = rbuf[i-1]+olengths[i-1].
348  CHKERR PetscPostIrecvInt(cOmm, tag, nrecvs, onodes, olengths, &rbuf,
349  &r_waits);
350 
351  MPI_Request *s_waits; // status of sens messages
352  CHKERR PetscMalloc1(nsends, &s_waits);
353 
354  // Send messages
355  for (int proc = 0, kk = 0; proc < sIze; proc++) {
356  if (!dofs_vec_length[proc])
357  continue; // no message to send to this proc
358  CHKERR MPI_Isend(&(dofs_vec[proc])[0], // buffer to send
359  dofs_vec_length[proc], // message length
360  MPIU_INT, proc, // to proc
361  tag, cOmm, s_waits + kk);
362  kk++;
363  }
364 
365  // Wait for received
366  if (nrecvs) {
367  CHKERR MPI_Waitall(nrecvs, r_waits, &status[0]);
368  }
369  // Wait for send messages
370  if (nsends) {
371  CHKERR MPI_Waitall(nsends, s_waits, &status[0]);
372  }
373 
374  for (int kk = 0; kk < nrecvs; kk++) {
375 
376  int len = olengths[kk];
377  int *data_from_proc = rbuf[kk];
378 
379  for (int ii = 0; ii < len;) {
380 
381  int row_idx = data_from_proc[ii++]; // get row number
382  int nb_adj_dofs = data_from_proc[ii++]; // get nb. of adjacent dofs
383 
384  if (debug) {
385 
386  DofByGlobalPetscIndex::iterator dit;
387  dit = p_miit->numeredRowDofs->get<PetscGlobalIdx_mi_tag>().find(
388  row_idx);
389  if (dit ==
390  p_miit->numeredRowDofs->get<PetscGlobalIdx_mi_tag>().end()) {
391  SETERRQ1(cOmm, MOFEM_DATA_INCONSISTENCY,
392  "dof %d can not be found in problem", row_idx);
393  }
394  }
395 
396  for (int jj = 0; jj < nb_adj_dofs; jj++) {
397  adjacent_dofs_on_other_parts[row_idx].push_back(data_from_proc[ii++]);
398  }
399  }
400  }
401 
402  // Cleaning
403  CHKERR PetscFree(s_waits);
404  CHKERR PetscFree(rbuf[0]);
405  CHKERR PetscFree(rbuf);
406  CHKERR PetscFree(r_waits);
407  CHKERR PetscFree(onodes);
408  CHKERR PetscFree(olengths);
409 
410  miit_row = dofs_row_by_idx.begin();
411  hi_miit_row = dofs_row_by_idx.end();
412  }
413 
414  boost::shared_ptr<FieldEntity> mofem_ent_ptr;
415  int row_last_evaluated_idx = -1;
416 
417  std::vector<int> dofs_vec;
418  std::vector<int> dofs_col_view;
419 
420  // loop local rows
421  int nb_loc_row_from_iterators = 0;
422  unsigned int rows_to_fill = p_miit->getNbLocalDofsRow();
423  i.reserve(rows_to_fill + 1);
424  for (; miit_row != hi_miit_row; miit_row++) {
425 
426  if (!TAG::IamNotPartitioned) {
427  if (static_cast<int>((*miit_row)->getPart()) != rAnk)
428  continue;
429  }
430  // This is only for cross-check if everything is ok
431  nb_loc_row_from_iterators++;
432 
433  // add next row to compressed matrix
434  i.push_back(j.size());
435 
436  // Get entity adjacencies, no need to repeat that operation for dofs when
437  // are on the same entity. For simplicity is assumed that those share the
438  // same adjacencies.
439  if ((!mofem_ent_ptr)
440  ? 1
441  : (mofem_ent_ptr->getLocalUniqueId() !=
442  (*miit_row)->getFieldEntityPtr()->getLocalUniqueId())) {
443 
444  // get entity adjacencies
445  mofem_ent_ptr = (*miit_row)->getFieldEntityPtr();
446  CHKERR getEntityAdjacenies<TAG>(p_miit, miit_row, mofem_ent_ptr,
447  dofs_col_view, verb);
448  row_last_evaluated_idx = TAG::get_index(miit_row);
449 
450  dofs_vec.resize(0);
451  // insert dofs_col_view
452  dofs_vec.insert(dofs_vec.end(), dofs_col_view.begin(),
453  dofs_col_view.end());
454 
455  unsigned char pstatus = (*miit_row)->getPStatus();
456  if (pstatus > 0) {
457  std::map<int, std::vector<int>>::iterator mit;
458  mit = adjacent_dofs_on_other_parts.find(row_last_evaluated_idx);
459  if (mit == adjacent_dofs_on_other_parts.end()) {
460  // NOTE: Dof can adjacent to other part but no elements are there
461  // which use that dof std::cerr << *miit_row << std::endl; SETERRQ1(
462  // cOmm,MOFEM_DATA_INCONSISTENCY,
463  // "data inconsistency row_last_evaluated_idx = %d",
464  // row_last_evaluated_idx
465  // );
466  } else {
467  dofs_vec.insert(dofs_vec.end(), mit->second.begin(),
468  mit->second.end());
469  }
470  }
471 
472  // sort and make unique
473  sort(dofs_vec.begin(), dofs_vec.end());
474  std::vector<int>::iterator new_end =
475  unique(dofs_vec.begin(), dofs_vec.end());
476  int new_size = std::distance(dofs_vec.begin(), new_end);
477  dofs_vec.resize(new_size);
478  }
479 
480  // Try to be smart reserving memory
481  if (j.capacity() < j.size() + dofs_vec.size()) {
482 
483  // TODO: [CORE-55] Improve algorithm estimating size of compressed matrix
484  unsigned int nb_nonzero = j.size() + dofs_vec.size();
485  unsigned int average_row_fill = nb_nonzero / i.size() + 1;
486  j.reserve(rows_to_fill * average_row_fill);
487  }
488 
489  auto hi_diit = dofs_vec.end();
490  for (auto diit = dofs_vec.begin(); diit != hi_diit; diit++) {
491 
492  if (no_diagonals) {
493  if (*diit == TAG::get_index(miit_row)) {
494  continue;
495  }
496  }
497  j.push_back(*diit);
498  }
499  }
500 
501  // build adj matrix
502  i.push_back(j.size());
503 
504  if (strcmp(type, MATMPIADJ) == 0) {
505 
506  // Adjacency matrix used to partition problems, f.e. METIS
507  if (i.size() - 1 != (unsigned int)nb_loc_row_from_iterators) {
508  SETERRQ(cOmm, PETSC_ERR_ARG_SIZ, "data inconsistency");
509  }
510 
511  } else if (strcmp(type, MATMPIAIJ) == 0) {
512 
513  // Compressed MPIADJ matrix
514  if (i.size() - 1 != (unsigned int)nb_loc_row_from_iterators) {
515  SETERRQ(cOmm, PETSC_ERR_ARG_SIZ, "data inconsistency");
516  }
517  PetscInt nb_local_dofs_row = p_miit->getNbLocalDofsRow();
518  if ((unsigned int)nb_local_dofs_row != i.size() - 1) {
519  SETERRQ(cOmm, PETSC_ERR_ARG_SIZ, "data inconsistency");
520  }
521 
522  } else if (strcmp(type, MATAIJ) == 0) {
523 
524  // Sequential compressed ADJ matrix
525  if (i.size() - 1 != (unsigned int)nb_loc_row_from_iterators) {
526  SETERRQ(cOmm, PETSC_ERR_ARG_SIZ, "data inconsistency");
527  }
528  PetscInt nb_local_dofs_row = p_miit->getNbLocalDofsRow();
529  if ((unsigned int)nb_local_dofs_row != i.size() - 1) {
530  SETERRQ(cOmm, PETSC_ERR_ARG_SIZ, "data inconsistency");
531  }
532 
533  } else {
534 
535  SETERRQ(cOmm, PETSC_ERR_ARG_NULL, "not implemented");
536  }
537 
538  PetscLogEventEnd(MOFEM_EVENT_createMat, 0, 0, 0, 0);
540 }
541 
543  UnknownInterface **iface) const {
545  *iface = NULL;
546  if (uuid == IDD_MOFEMMatrixManager) {
547  *iface = const_cast<MatrixManager *>(this);
549  }
550  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "unknown interface");
552 }
553 
555  : cOre(const_cast<MoFEM::Core &>(core)) {
556  PetscLogEventRegister("MatrixManagerCreateMPIAIJ", 0,
558  PetscLogEventRegister("MatrixManagerCreateMPIAIJWithArrays", 0,
560  PetscLogEventRegister("MatrixManagerCreateMPIAdjWithArrays", 0,
562  PetscLogEventRegister("MatrixManagerCreateSeqAIJWithArrays", 0,
564  PetscLogEventRegister("MatrixManagerCheckMPIAIJWithArraysMatrixFillIn", 0,
566 }
567 
568 template <>
569 MoFEMErrorCode MatrixManager::createMPIAIJWithArrays<PetscGlobalIdx_mi_tag>(
570  const std::string name, Mat *Aij, int verb) {
571  MoFEM::CoreInterface &m_field = cOre;
572  CreateRowComressedADJMatrix *core_ptr =
573  static_cast<CreateRowComressedADJMatrix *>(&cOre);
575  PetscLogEventBegin(MOFEM_EVENT_createMPIAIJWithArrays, 0, 0, 0, 0);
576 
577  auto problems_ptr = m_field.get_problems();
578  auto &prb = problems_ptr->get<Problem_mi_tag>();
579  auto p_miit = prb.find(name);
580  if (p_miit == prb.end()) {
581  SETERRQ1(m_field.get_comm(), MOFEM_NOT_FOUND,
582  "problem < %s > is not found (top tip: check spelling)",
583  name.c_str());
584  }
585 
586  std::vector<int> i_vec, j_vec;
587  j_vec.reserve(10000);
589  p_miit, MATMPIAIJ, i_vec, j_vec, false, verb);
590 
591  int nb_row_dofs = p_miit->getNbDofsRow();
592  int nb_col_dofs = p_miit->getNbDofsCol();
593  int nb_local_dofs_row = p_miit->getNbLocalDofsRow();
594  int nb_local_dofs_col = p_miit->getNbLocalDofsCol();
595 
596  CHKERR ::MatCreateMPIAIJWithArrays(
597  m_field.get_comm(), nb_local_dofs_row, nb_local_dofs_col, nb_row_dofs,
598  nb_col_dofs, &*i_vec.begin(), &*j_vec.begin(), PETSC_NULL, Aij);
599 
600  PetscLogEventEnd(MOFEM_EVENT_createMPIAIJWithArrays, 0, 0, 0, 0);
602 }
603 
604 template <>
605 MoFEMErrorCode MatrixManager::createMPIAIJ<PetscGlobalIdx_mi_tag>(
606  const std::string name, Mat *Aij, int verb) {
607  MoFEM::CoreInterface &m_field = cOre;
608  CreateRowComressedADJMatrix *core_ptr =
609  static_cast<CreateRowComressedADJMatrix *>(&cOre);
611  PetscLogEventBegin(MOFEM_EVENT_createMPIAIJ, 0, 0, 0, 0);
612 
613  auto problems_ptr = m_field.get_problems();
614  auto &prb = problems_ptr->get<Problem_mi_tag>();
615  auto p_miit = prb.find(name);
616  if (p_miit == prb.end()) {
617  SETERRQ1(m_field.get_comm(), MOFEM_NOT_FOUND,
618  "problem < %s > is not found (top tip: check spelling)",
619  name.c_str());
620  }
621 
622  std::vector<int> i_vec, j_vec;
623  j_vec.reserve(10000);
625  p_miit, MATMPIAIJ, i_vec, j_vec, false, verb);
626 
627  int nb_row_dofs = p_miit->getNbDofsRow();
628  int nb_col_dofs = p_miit->getNbDofsCol();
629  int nb_local_dofs_row = p_miit->getNbLocalDofsRow();
630  int nb_local_dofs_col = p_miit->getNbLocalDofsCol();
631 
632  auto get_layout = [&]() {
633  int start_ranges, end_ranges;
634  PetscLayout layout;
635  CHKERR PetscLayoutCreate(m_field.get_comm(), &layout);
636  CHKERR PetscLayoutSetBlockSize(layout, 1);
637  CHKERR PetscLayoutSetLocalSize(layout, nb_local_dofs_col);
638  CHKERR PetscLayoutSetUp(layout);
639  CHKERR PetscLayoutGetRange(layout, &start_ranges, &end_ranges);
640  CHKERR PetscLayoutDestroy(&layout);
641  return std::make_pair(start_ranges, end_ranges);
642  };
643 
644  auto get_nnz = [&](auto &d_nnz, auto &o_nnz) {
646  auto layout = get_layout();
647  int j = 0;
648  for (int i = 0; i != nb_local_dofs_row; ++i) {
649  for (; j != i_vec[i + 1]; ++j) {
650  if (j_vec[j] < layout.second && j_vec[j] >= layout.first)
651  ++(d_nnz[i]);
652  else
653  ++(o_nnz[i]);
654  }
655  }
657  };
658 
659  std::vector<int> d_nnz(nb_local_dofs_row, 0), o_nnz(nb_local_dofs_row, 0);
660  CHKERR get_nnz(d_nnz, o_nnz);
661 
662 
663  CHKERR MatCreate(m_field.get_comm(), Aij);
664  CHKERR MatSetSizes(*Aij, nb_local_dofs_row, nb_local_dofs_col, nb_row_dofs,
665  nb_col_dofs);
666  CHKERR MatSetType(*Aij, MATMPIAIJ);
667  CHKERR MatMPIAIJSetPreallocation(*Aij, 0, &*d_nnz.begin(), 0,
668  &*o_nnz.begin());
669 
670  PetscLogEventEnd(MOFEM_EVENT_createMPIAIJ, 0, 0, 0, 0);
672 }
673 
674 template <>
676 MatrixManager::createMPIAdjWithArrays<Idx_mi_tag>(const std::string name,
677  Mat *Adj, int verb) {
678  MoFEM::CoreInterface &m_field = cOre;
679  CreateRowComressedADJMatrix *core_ptr =
680  static_cast<CreateRowComressedADJMatrix *>(&cOre);
682  PetscLogEventBegin(MOFEM_EVENT_createMPIAdjWithArrays, 0, 0, 0, 0);
683 
684  auto problems_ptr = m_field.get_problems();
685  auto &prb = problems_ptr->get<Problem_mi_tag>();
686  auto p_miit = prb.find(name);
687  if (p_miit == prb.end()) {
688  SETERRQ1(m_field.get_comm(), MOFEM_NOT_FOUND,
689  "problem < %s > is not found (top tip: check spelling)",
690  name.c_str());
691  }
692 
693  std::vector<int> i_vec, j_vec;
694  j_vec.reserve(10000);
695  CHKERR core_ptr->createMatArrays<Idx_mi_tag>(p_miit, MATMPIADJ, i_vec, j_vec,
696  true, verb);
697  int *_i, *_j;
698  CHKERR PetscMalloc(i_vec.size() * sizeof(int), &_i);
699  CHKERR PetscMalloc(j_vec.size() * sizeof(int), &_j);
700  copy(i_vec.begin(), i_vec.end(), _i);
701  copy(j_vec.begin(), j_vec.end(), _j);
702 
703  int nb_col_dofs = p_miit->getNbDofsCol();
704  CHKERR MatCreateMPIAdj(m_field.get_comm(), i_vec.size() - 1, nb_col_dofs, _i,
705  _j, PETSC_NULL, Adj);
706  CHKERR MatSetOption(*Adj, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
707 
708  PetscLogEventEnd(MOFEM_EVENT_createMPIAdjWithArrays, 0, 0, 0, 0);
710 }
711 
712 template <>
713 MoFEMErrorCode MatrixManager::createSeqAIJWithArrays<PetscLocalIdx_mi_tag>(
714  const std::string name, Mat *Aij, int verb) {
715  MoFEM::CoreInterface &m_field = cOre;
716  CreateRowComressedADJMatrix *core_ptr =
717  static_cast<CreateRowComressedADJMatrix *>(&cOre);
719  PetscLogEventBegin(MOFEM_EVENT_createMPIAIJWithArrays, 0, 0, 0, 0);
720 
721  auto problems_ptr = m_field.get_problems();
722  auto &prb = problems_ptr->get<Problem_mi_tag>();
723  auto p_miit = prb.find(name);
724  if (p_miit == prb.end()) {
725  SETERRQ1(m_field.get_comm(), MOFEM_NOT_FOUND,
726  "problem < %s > is not found (top tip: check spelling)",
727  name.c_str());
728  }
729 
730  std::vector<int> i_vec, j_vec;
731  j_vec.reserve(10000);
732  CHKERR core_ptr->createMatArrays<PetscGlobalIdx_mi_tag>(p_miit, MATAIJ, i_vec,
733  j_vec, false, verb);
734 
735  int nb_local_dofs_row = p_miit->getNbLocalDofsRow();
736  int nb_local_dofs_col = p_miit->getNbLocalDofsCol();
737 
738  double *_a;
739  CHKERR PetscMalloc(j_vec.size() * sizeof(double), &_a);
740 
741  Mat tmpMat;
742  CHKERR ::MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, nb_local_dofs_row,
743  nb_local_dofs_col, &*i_vec.begin(),
744  &*j_vec.begin(), _a, &tmpMat);
745  CHKERR MatDuplicate(tmpMat, MAT_SHARE_NONZERO_PATTERN, Aij);
746  CHKERR MatDestroy(&tmpMat);
747 
748  CHKERR PetscFree(_a);
749 
750  PetscLogEventEnd(MOFEM_EVENT_createMPIAIJWithArrays, 0, 0, 0, 0);
752 }
753 
754 MoFEMErrorCode MatrixManager::checkMatrixFillIn(const std::string problem_name,
755  int row_print, int col_print,
756  Mat A, int verb) {
757  MoFEM::CoreInterface &m_field = cOre;
759 
760  PetscLogEventBegin(MOFEM_EVENT_checkMatrixFillIn, 0, 0, 0, 0);
761 
762  struct TestMatrixFillIn : public FEMethod {
763  CoreInterface *mFieldPtr;
764 
765  Mat A;
766 
767  int rowPrint, colPrint;
768 
769  TestMatrixFillIn(CoreInterface *m_field_ptr, Mat a, int row_print,
770  int col_print)
771  : mFieldPtr(m_field_ptr), A(a), rowPrint(row_print),
772  colPrint(col_print){};
773 
774  MoFEMErrorCode preProcess() {
777  }
778 
779  MoFEMErrorCode operator()() {
781 
782  if (refinedFiniteElementsPtr->find(
783  numeredEntFiniteElementPtr->getEnt()) ==
784  refinedFiniteElementsPtr->end()) {
785  SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
786  "data inconsistency");
787  }
788 
789  auto row_dofs = getRowDofsPtr();
790  auto col_dofs = getColDofsPtr();
791 
792  for (auto cit = col_dofs->begin(); cit != col_dofs->end(); cit++) {
793 
794  if (refinedEntitiesPtr->find((*cit)->getEnt()) ==
795  refinedEntitiesPtr->end()) {
796  SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
797  "data inconsistency");
798  }
799  if (!(*cit)->getActive()) {
800  SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
801  "data inconsistency");
802  }
803 
804  FieldEntityEntFiniteElementAdjacencyMap_multiIndex::index<
805  Composite_Unique_mi_tag>::type::iterator ait;
806  ait = adjacenciesPtr->get<Composite_Unique_mi_tag>().find(
807  boost::make_tuple((*cit)->getFieldEntityPtr()->getLocalUniqueId(),
808  numeredEntFiniteElementPtr->getLocalUniqueId()));
809  if (ait == adjacenciesPtr->end()) {
810  SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
811  "adjacencies data inconsistency");
812  } else {
813  UId uid = ait->getEntUniqueId();
814  if (entitiesPtr->find(uid) == entitiesPtr->end()) {
815  SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
816  "data inconsistency");
817  }
818  if (dofsPtr->find((*cit)->getLocalUniqueId()) == dofsPtr->end()) {
819  SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
820  "data inconsistency");
821  }
822  }
823 
824  if ((*cit)->getEntType() != MBVERTEX) {
825 
826  auto range =
827  col_dofs->get<Ent_mi_tag>().equal_range((*cit)->getEnt());
828  int nb_dofs_on_ent = std::distance(range.first, range.second);
829 
830  int max_order = (*cit)->getMaxOrder();
831  if ((*cit)->getNbOfCoeffs() * (*cit)->getOrderNbDofs(max_order) !=
832  nb_dofs_on_ent) {
833  MOFEM_LOG("SELF", Sev::warning)
834  << "Warning: Number of Dofs in Col diffrent than number "
835  "of dofs for given entity order "
836  << (*cit)->getNbOfCoeffs() * (*cit)->getOrderNbDofs(max_order)
837  << " " << nb_dofs_on_ent;
838  }
839  }
840  }
841 
842 
843  for (auto rit = row_dofs->begin(); rit != row_dofs->end(); rit++) {
844 
845  if (refinedEntitiesPtr->find((*rit)->getEnt()) ==
846  refinedEntitiesPtr->end()) {
847  SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
848  "data inconsistency");
849  }
850  if (!(*rit)->getActive()) {
851  SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
852  "data inconsistency");
853  }
854 
855  FieldEntityEntFiniteElementAdjacencyMap_multiIndex::index<
856  Composite_Unique_mi_tag>::type::iterator ait;
857  ait = adjacenciesPtr->get<Composite_Unique_mi_tag>().find(
858  boost::make_tuple((*rit)->getFieldEntityPtr()->getLocalUniqueId(),
859  numeredEntFiniteElementPtr->getLocalUniqueId()));
860  if (ait == adjacenciesPtr->end()) {
862  MOFEM_LOG("SELF", Sev::error) << *(*rit);
863  MOFEM_LOG("SELF", Sev::error) << *(*rit);
864  MOFEM_LOG("SELF", Sev::error) << *numeredEntFiniteElementPtr;
865  MOFEM_LOG("SELF", Sev::error) << "dof: " << (*rit)->getBitRefLevel();
866  MOFEM_LOG("SELF", Sev::error)
867  << "fe: " << numeredEntFiniteElementPtr->getBitRefLevel();
868  MOFEM_LOG("SELF", Sev::error)
869  << "problem: " << problemPtr->getBitRefLevel();
870  MOFEM_LOG("SELF", Sev::error)
871  << "problem mask: " << problemPtr->getMaskBitRefLevel();
872  SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
873  "adjacencies data inconsistency");
874  } else {
875  UId uid = ait->getEntUniqueId();
876  if (entitiesPtr->find(uid) == entitiesPtr->end()) {
877  SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
878  "data inconsistency");
879  }
880  if (dofsPtr->find((*rit)->getLocalUniqueId()) == dofsPtr->end()) {
881  SETERRQ(mFieldPtr->get_comm(), MOFEM_DATA_INCONSISTENCY,
882  "data inconsistency");
883  }
884  }
885  int row = (*rit)->getPetscGlobalDofIdx();
886 
887  auto col_dofs = getColDofsPtr();
888  for (auto cit = col_dofs->begin(); cit != col_dofs->end(); cit++) {
889 
890  int col = (*cit)->getPetscGlobalDofIdx();
891 
892  if (row == rowPrint && col == colPrint) {
893  MOFEM_LOG("SELF", Sev::noisy) << "fe:\n"
894  << *numeredEntFiniteElementPtr;
895  MOFEM_LOG("SELF", Sev::noisy) << "row:\n" << *(*rit);
896  MOFEM_LOG("SELF", Sev::noisy) << "col:\n" << *(*cit);
897  MOFEM_LOG("SELF", Sev::noisy)
898  << "fe:\n"
899  << numeredEntFiniteElementPtr->getBitRefLevel();
900  MOFEM_LOG("SELF", Sev::noisy) << "row:\n"
901  << (*rit)->getBitRefLevel();
902  MOFEM_LOG("SELF", Sev::noisy) << "col:\n"
903  << (*cit)->getBitRefLevel();
904  }
905 
906  CHKERR MatSetValue(A, row, col, 1, INSERT_VALUES);
907  }
908 
909  if ((*rit)->getEntType() != MBVERTEX) {
910 
911  auto range =
912  row_dofs->get<Ent_mi_tag>().equal_range((*rit)->getEnt());
913  int nb_dofs_on_ent = std::distance(range.first, range.second);
914 
915  int max_order = (*rit)->getMaxOrder();
916  if ((*rit)->getNbOfCoeffs() * (*rit)->getOrderNbDofs(max_order) !=
917  nb_dofs_on_ent) {
918  MOFEM_LOG("SELF", Sev::warning)
919  << "Warning: Number of Dofs in Row diffrent than number "
920  "of dofs for given entity order "
921  << (*rit)->getNbOfCoeffs() * (*rit)->getOrderNbDofs(max_order)
922  << " " << nb_dofs_on_ent;
923  }
924  }
925  }
926 
928  }
929 
930  MoFEMErrorCode postProcess() {
932 
933  // cerr << mFieldPtr->get_comm_rank() << endl;
934  CHKERR MatAssemblyBegin(A, MAT_FLUSH_ASSEMBLY);
935  CHKERR MatAssemblyEnd(A, MAT_FLUSH_ASSEMBLY);
936 
938  }
939  };
940 
941  // create matrix
942  CHKERR MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE);
943 
944  if (verb >= VERY_VERBOSE) {
945  MatView(A, PETSC_VIEWER_STDOUT_WORLD);
946  }
947 
948  if (verb >= NOISY) {
949  MatView(A, PETSC_VIEWER_DRAW_WORLD);
950  std::string wait;
951  std::cin >> wait;
952  }
953 
954  TestMatrixFillIn method(&m_field, A, row_print, col_print);
955 
956  // get problem
957  auto problems_ptr = m_field.get_problems();
958  auto &prb_set = problems_ptr->get<Problem_mi_tag>();
959  auto p_miit = prb_set.find(problem_name);
960  if (p_miit == prb_set.end())
961  SETERRQ1(m_field.get_comm(), MOFEM_DATA_INCONSISTENCY,
962  "problem < %s > not found (top tip: check spelling)",
963  problem_name.c_str());
964  MOFEM_LOG_C("WORLD", Sev::inform, "check problem < %s >",
965  problem_name.c_str());
966 
967  // loop all elements in problem and check if assemble is without error
968  auto fe_ptr = m_field.get_finite_elements();
969  for (auto &fe : *fe_ptr) {
970  MOFEM_LOG_C("WORLD", Sev::verbose, "\tcheck element %s",
971  fe->getName().c_str());
972  CHKERR m_field.loop_finite_elements(problem_name, fe->getName(), method,
973  nullptr, MF_EXIST,
974  CacheTupleSharedPtr(), verb);
975  }
976 
977  CHKERR MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
978  CHKERR MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
979 
980  PetscLogEventEnd(MOFEM_EVENT_checkMatrixFillIn, 0, 0, 0, 0);
981 
983 }
984 
985 template <>
987 MatrixManager::checkMPIAIJWithArraysMatrixFillIn<PetscGlobalIdx_mi_tag>(
988  const std::string problem_name, int row_print, int col_print, int verb) {
989  MoFEM::CoreInterface &m_field = cOre;
991  // create matrix
993  CHKERR createMPIAIJWithArrays<PetscGlobalIdx_mi_tag>(problem_name, A, verb);
994  CHKERR MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE);
995  CHKERR checkMatrixFillIn(problem_name, row_print, col_print, A, verb);
997 }
998 
999 template <>
1000 MoFEMErrorCode MatrixManager::checkMPIAIJMatrixFillIn<PetscGlobalIdx_mi_tag>(
1001  const std::string problem_name, int row_print, int col_print, int verb) {
1002  MoFEM::CoreInterface &m_field = cOre;
1004  // create matrix
1006  CHKERR createMPIAIJ<PetscGlobalIdx_mi_tag>(problem_name, A, verb);
1007  CHKERR MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE);
1008  CHKERR checkMatrixFillIn(problem_name, row_print, col_print, A, verb);
1010 }
1011 
1012 } // namespace MoFEM
structure for User Loop Methods on finite elementsIt can be used to calculate stiffness matrices,...
MoFEM interface unique ID.
PetscLogEvent MOFEM_EVENT_createMat
Definition: Core.hpp:957
BlockParamData * cache
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:509
boost::shared_ptr< CacheTuple > CacheTupleSharedPtr
#define MOFEM_LOG(channel, severity)
Log.
Definition: LogManager.hpp:303
base class for all interface classes
#define MOFEM_LOG_C(channel, severity, format,...)
Definition: LogManager.hpp:306
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:485
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:516
PetscLogEvent MOFEM_EVENT_createSeqAIJWithArrays
Create compressed matrix.
FieldEntityEntFiniteElementAdjacencyMap_multiIndex::index< Unique_mi_tag >::type AdjByEnt
implementation of Data Operators for Forces and Sources
Definition: Common.hpp:21
int verbose
Verbosity level.
Definition: Core.hpp:1006
PetscLogEvent MOFEM_EVENT_createMPIAIJ
MoFEMErrorCode checkMatrixFillIn(const std::string problem_name, int row_print, int col_print, Mat A, int verb=QUIET)
check if matrix fill in correspond to finite element indices
PetscLogEvent MOFEM_EVENT_checkMatrixFillIn
multi_index_container< boost::shared_ptr< NumeredDofEntity >, indexed_by< ordered_unique< tag< Unique_mi_tag >, const_mem_fun< NumeredDofEntity::interface_type_DofEntity, UId, &NumeredDofEntity::getLocalUniqueId > >, ordered_non_unique< tag< Part_mi_tag >, member< NumeredDofEntity, unsigned int, &NumeredDofEntity::pArt > >, ordered_non_unique< tag< Idx_mi_tag >, member< NumeredDofEntity, DofIdx, &NumeredDofEntity::dofIdx > >, ordered_non_unique< tag< PetscGlobalIdx_mi_tag >, member< NumeredDofEntity, DofIdx, &NumeredDofEntity::petscGloablDofIdx > >, ordered_non_unique< tag< PetscLocalIdx_mi_tag >, member< NumeredDofEntity, DofIdx, &NumeredDofEntity::petscLocalDofIdx > >, ordered_non_unique< tag< Ent_mi_tag >, const_mem_fun< NumeredDofEntity::interface_type_DofEntity, EntityHandle, &NumeredDofEntity::getEnt > > > > NumeredDofEntity_multiIndex
MultiIndex container keeps NumeredDofEntity.
static Index< 'i', 3 > i
std::reference_wrapper< moab::Interface > moab
moab database
Definition: Core.hpp:318
int rAnk
MOFEM communicator rank.
Definition: Core.hpp:969
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
Definition: Exceptions.hpp:67
static const bool debug
InterfaceThis interface is used by user to:
Definition: Interface.hpp:42
virtual const FiniteElement_multiIndex * get_finite_elements() const =0
Get the finite elements object.
PetscLogEvent MOFEM_EVENT_createMPIAdjWithArrays
static Index< 'j', 3 > j
CreateRowComressedADJMatrix(moab::Interface &moab, MPI_Comm comm=PETSC_COMM_WORLD, int verbose=1)
MatrixManager(const MoFEM::Core &core)
#define CHKERR
Inline error check.
Definition: definitions.h:604
const double r
rate factor
static const MOFEMuuid IDD_MOFEMMatrixManager
NumeredDofEntity_multiIndex::index< PetscGlobalIdx_mi_tag >::type DofByGlobalPetscIndex
std::bitset< BITREFLEVEL_SIZE > BitRefLevel
Bit structure attached to each entity identifying to what mesh entity is attached.
Definition: Types.hpp:51
PetscLogEvent MOFEM_EVENT_createMPIAIJWithArrays
MoFEMErrorCode query_interface(const MOFEMuuid &uuid, UnknownInterface **iface) const
#define MOFEM_LOG_ATTRIBUTES(channel, bit)
Add attributes to channel.
Definition: LogManager.hpp:291
Problem_multiIndex::index< Problem_mi_tag >::type ProblemsByName
#define MAX_DOFS_ON_ENTITY
Maximal number of DOFs on entity.
Definition: definitions.h:305
DeprecatedCoreInterface Interface
Definition: Interface.hpp:1943
virtual const Problem_multiIndex * get_problems() const =0
Get the problems object.
MoFEMErrorCode createMatArrays(ProblemsByName::iterator p_miit, const MatType type, std::vector< PetscInt > &i, std::vector< PetscInt > &j, const bool no_diagonals=true, int verb=QUIET) const
Create matrix adjacencies.
FieldEntityEntFiniteElementAdjacencyMap_multiIndex entFEAdjacencies
adjacencies of elements to dofs
Definition: Core.hpp:308
int DofIdx
Index of DOF.
Definition: Types.hpp:29
Core (interface) class.
Definition: Core.hpp:77
virtual MoFEMErrorCode loop_finite_elements(const std::string &problem_name, const std::string &fe_name, FEMethod &method, boost::shared_ptr< NumeredEntFiniteElement_multiIndex > fe_ptr=nullptr, MoFEMTypes bh=MF_EXIST, CacheTupleWeakPtr cache_ptr=CacheTupleSharedPtr(), int verb=DEFAULT_VERBOSITY)=0
Make a loop over finite elements.
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:415
#define MatrixManagerFunctionBegin
Create adjacent matrices using different indices.
virtual MPI_Comm & get_comm() const =0
int sIze
MoFEM communicator size.
Definition: Core.hpp:968
MoFEMErrorCode getEntityAdjacenies(ProblemsByName::iterator p_miit, typename boost::multi_index::index< NumeredDofEntity_multiIndex, TAG >::type::iterator mit_row, boost::shared_ptr< FieldEntity > mofem_ent_ptr, std::vector< int > &dofs_col_view, int verb) const
Get element adjacencies.
FieldEntity_multiIndex entsFields
entities on fields
Definition: Core.hpp:301
MPI_Comm cOmm
MoFEM communicator.
Definition: Core.hpp:965
uint128_t UId
Unique Id.
Definition: Types.hpp:42
#define MOFEM_LOG_SYNCHRONISE(comm)
Synchronise "SYNC" channel.
Definition: LogManager.hpp:340