v0.14.0
CommInterface.cpp
Go to the documentation of this file.
1 /** \file CommInterface.cpp
2  * \brief Functions for interprocessor communications
3  * \mofem_comm
4  */
5 
6 namespace MoFEM {
7 
8 #ifdef PARMETIS
9 
10 MoFEMErrorCode MatPartitioningApply_Parmetis_MoFEM(MatPartitioning part,
11  IS *partitioning);
12 
13 #endif // PARMETIS
14 
16 CommInterface::query_interface(boost::typeindex::type_index type_index,
17  UnknownInterface **iface) const {
19  *iface = const_cast<CommInterface *>(this);
21 }
22 
24  : cOre(const_cast<MoFEM::Core &>(core)), dEbug(false) {}
25 
27  Range &ents, std::map<int, Range> *received_ents, int verb) {
28  MoFEM::Interface &m_field = cOre;
29  ParallelComm *pcomm = ParallelComm::get_pcomm(
30  &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
32 
33  auto get_pstatus = [&](const auto ent) {
34  unsigned char pstatus;
35  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->pstatus_tag(), &ent,
36  1, &pstatus),
37  "can not get pstatus");
38  return pstatus;
39  };
40 
41  auto get_sharing_procs = [&](const auto ent, const auto pstatus) {
42  std::vector<int> sharing_procs(MAX_SHARING_PROCS, -1);
43  if (pstatus & PSTATUS_MULTISHARED) {
44  // entity is multi shared
45  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(
46  pcomm->sharedps_tag(), &ent, 1, &sharing_procs[0]),
47  "can not ger sharing_procs_ptr");
48  } else if (pstatus & PSTATUS_SHARED) {
49  // shared
50  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->sharedp_tag(), &ent,
51  1, &sharing_procs[0]),
52  "can not get sharing proc");
53  }
54  return sharing_procs;
55  };
56 
57  auto get_sharing_handles = [&](const auto ent, const auto pstatus) {
58  std::vector<EntityHandle> sharing_handles(MAX_SHARING_PROCS, 0);
59  if (pstatus & PSTATUS_MULTISHARED) {
60  // entity is multi shared
61  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(
62  pcomm->sharedhs_tag(), &ent, 1, &sharing_handles[0]),
63  "get shared handles");
64  } else if (pstatus & PSTATUS_SHARED) {
65  // shared
66  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->sharedh_tag(), &ent,
67  1, &sharing_handles[0]),
68  "get sharing handle");
69  }
70  return sharing_handles;
71  };
72 
73  // make a buffer entities to send
74  std::vector<std::vector<EntityHandle>> sbuffer(m_field.get_comm_size());
75 
76  for (auto ent : ents) {
77 
78  auto pstatus = get_pstatus(ent);
79  if (pstatus) {
80  auto sharing_procs = get_sharing_procs(ent, pstatus);
81  auto sharing_handles = get_sharing_handles(ent, pstatus);
82 
83  if (verb >= NOISY) {
84  MOFEM_LOG("SYNC", Sev::noisy) << "pstatus " << std::bitset<8>(pstatus);
85  }
86 
87  for (int proc = 0; proc < MAX_SHARING_PROCS && -1 != sharing_procs[proc];
88  proc++) {
89  if (sharing_procs[proc] == -1)
90  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSSIBLE_CASE,
91  "sharing processor not set");
92 
93  if (sharing_procs[proc] == m_field.get_comm_rank())
94  continue;
95 
96  const auto handle_on_sharing_proc = sharing_handles[proc];
97  // add entity to send, handle on the other side
98  sbuffer[sharing_procs[proc]].push_back(handle_on_sharing_proc);
99  if (verb >= NOISY) {
100  MOFEM_LOG_C("SYNC", Sev::noisy, "send %lu (%lu) to %d at %d\n", ent,
101  handle_on_sharing_proc, sharing_procs[proc],
102  m_field.get_comm_rank());
103  }
104 
105  if (!(pstatus & PSTATUS_MULTISHARED))
106  break;
107  }
108  }
109  }
110  if (verb >= NOISY) {
111  MOFEM_LOG_SEVERITY_SYNC(m_field.get_comm(), Sev::noisy);
112  }
113 
114  int nsends = 0; // number of messages to send
115  std::vector<int> sbuffer_lengths(
116  m_field.get_comm_size()); // length of the message to proc
117  const size_t block_size =
118  sizeof(EntityHandle) /
119  sizeof(int); // FIXME check if that works on given architecture!
120  for (int proc = 0; proc < m_field.get_comm_size(); proc++) {
121 
122  if (!sbuffer[proc].empty()) {
123 
124  sbuffer_lengths[proc] = sbuffer[proc].size() * block_size;
125  nsends++;
126 
127  } else {
128 
129  sbuffer_lengths[proc] = 0;
130  }
131  }
132 
133  // Make sure it is a PETSc m_field.get_comm()
134  MPI_Comm comm;
135  CHKERR PetscCommDuplicate(m_field.get_comm(), &comm, NULL);
136 
137  std::vector<MPI_Status> status(m_field.get_comm_size());
138 
139  // Computes the number of messages a node expects to receive
140  int nrecvs; // number of messages received
141  CHKERR PetscGatherNumberOfMessages(comm, NULL, &sbuffer_lengths[0], &nrecvs);
142 
143  // Computes info about messages that a MPI-node will receive, including
144  // (from-id,length) pairs for each message.
145  int *onodes; // list of node-ids from which messages are expected
146  int *olengths; // corresponding message lengths
147  CHKERR PetscGatherMessageLengths(comm, nsends, nrecvs, &sbuffer_lengths[0],
148  &onodes, &olengths);
149 
150  // Gets a unique new tag from a PETSc communicator. All processors that share
151  // the communicator MUST call this routine EXACTLY the same number of times.
152  // This tag should only be used with the current objects communicator; do NOT
153  // use it with any other MPI communicator.
154  int tag;
155  CHKERR PetscCommGetNewTag(comm, &tag);
156 
157  // Allocate a buffer sufficient to hold messages of size specified in
158  // olengths. And post Irecvs on these buffers using node info from onodes
159  int **rbuf; // must bee freed by user
160  MPI_Request *r_waits; // must bee freed by user
161  // rbuf has a pointers to messages. It has size of of nrecvs (number of
162  // messages) +1. In the first index a block is allocated,
163  // such that rbuf[i] = rbuf[i-1]+olengths[i-1].
164  CHKERR PetscPostIrecvInt(comm, tag, nrecvs, onodes, olengths, &rbuf,
165  &r_waits);
166 
167  MPI_Request *s_waits; // status of sens messages
168  CHKERR PetscMalloc1(nsends, &s_waits);
169 
170  // Send messages
171  for (int proc = 0, kk = 0; proc < m_field.get_comm_size(); proc++) {
172  if (!sbuffer_lengths[proc])
173  continue; // no message to send to this proc
174  CHKERR MPI_Isend(&(sbuffer[proc])[0], // buffer to send
175  sbuffer_lengths[proc], // message length
176  MPIU_INT, proc, // to proc
177  tag, comm, s_waits + kk);
178  kk++;
179  }
180 
181  // Wait for received
182  if (nrecvs)
183  CHKERR MPI_Waitall(nrecvs, r_waits, &status[0]);
184 
185  // Wait for send messages
186  if (nsends)
187  CHKERR MPI_Waitall(nsends, s_waits, &status[0]);
188 
189  if (verb >= VERBOSE) {
190  MOFEM_LOG_C("SYNC", Sev::verbose, "Rank %d nb. before ents %u\n",
191  m_field.get_comm_rank(), ents.size());
192  MOFEM_LOG_SEVERITY_SYNC(m_field.get_comm(), Sev::verbose);
193  }
194 
195  // synchronise range
196  for (int kk = 0; kk < nrecvs; kk++) {
197 
198  int len = olengths[kk];
199  int *data_from_proc = rbuf[kk];
200 
201  for (int ee = 0; ee < len;) {
202  EntityHandle ent;
203  bcopy(&data_from_proc[ee], &ent, sizeof(EntityHandle));
204  ents.insert(ent);
205 
206  if (received_ents) {
207  (*received_ents)[onodes[kk]].insert(ent);
208  }
209 
210  ee += block_size;
211 
212  if (verb >= VERBOSE) {
213  MOFEM_LOG_C("SYNC", Sev::noisy, "received %lu from %d at %d\n", ent,
214  onodes[kk], m_field.get_comm_rank());
215  }
216  }
217  }
218  if (verb >= VERBOSE) {
219  MOFEM_LOG_SEVERITY_SYNC(m_field.get_comm(), Sev::verbose);
220  }
221 
222  if (verb >= VERBOSE) {
223  MOFEM_LOG_C("SYNC", Sev::verbose, "Rank %d nb. after ents %u",
224  m_field.get_comm_rank(), ents.size());
225  MOFEM_LOG_SEVERITY_SYNC(m_field.get_comm(), Sev::verbose);
226  }
227 
228  // Cleaning
229  CHKERR PetscFree(s_waits);
230  CHKERR PetscFree(rbuf[0]);
231  CHKERR PetscFree(rbuf);
232  CHKERR PetscFree(r_waits);
233  CHKERR PetscFree(onodes);
234  CHKERR PetscFree(olengths);
235  CHKERR PetscCommDestroy(&comm);
236 
238 }
239 
241  return synchroniseEntities(ents, nullptr, verb);
242 }
243 
245  int verb) {
246  MoFEM::Interface &m_field = cOre;
248  EntityHandle idm = m_field.get_field_meshset(name);
249  Range ents;
250  CHKERR m_field.get_moab().get_entities_by_handle(idm, ents, false);
251  CHKERR synchroniseEntities(ents, nullptr, verb);
252  CHKERR m_field.get_moab().add_entities(idm, ents);
254 }
255 
257  int verb) {
258  MoFEM::Interface &m_field = cOre;
259  ParallelComm *pcomm = ParallelComm::get_pcomm(
260  &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
261 
263 
264  Range shared = ents;
265  CHKERR pcomm->filter_pstatus(shared, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1,
266  nullptr);
267  CHKERR pcomm->filter_pstatus(shared, PSTATUS_SHARED | PSTATUS_MULTISHARED,
268  PSTATUS_OR, -1, nullptr);
269 
270  auto th_RefParentHandle = cOre.get_th_RefParentHandle();
271  auto th_RefBitLevel = cOre.get_th_RefBitLevel();
272 
273  auto get_pstatus = [&](const auto ent) {
274  unsigned char pstatus;
275  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->pstatus_tag(), &ent,
276  1, &pstatus),
277  "can not get pstatus");
278  return pstatus;
279  };
280 
281  auto get_sharing_procs = [&](const auto ent, const auto pstatus) {
282  std::vector<int> sharing_procs(MAX_SHARING_PROCS, -1);
283  if (pstatus & PSTATUS_MULTISHARED) {
284  // entity is multi shared
285  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(
286  pcomm->sharedps_tag(), &ent, 1, &sharing_procs[0]),
287  "can not ger sharing_procs_ptr");
288  } else if (pstatus & PSTATUS_SHARED) {
289  // shared
290  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->sharedp_tag(), &ent,
291  1, &sharing_procs[0]),
292  "can not get sharing proc");
293  }
294  return sharing_procs;
295  };
296 
297  auto get_sharing_handles = [&](const auto ent, const auto pstatus) {
298  std::vector<EntityHandle> sharing_handles(MAX_SHARING_PROCS, 0);
299  if (pstatus & PSTATUS_MULTISHARED) {
300  // entity is multi shared
301  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(
302  pcomm->sharedhs_tag(), &ent, 1, &sharing_handles[0]),
303  "get shared handles");
304  } else if (pstatus & PSTATUS_SHARED) {
305  // shared
306  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->sharedh_tag(), &ent,
307  1, &sharing_handles[0]),
308  "get sharing handle");
309  }
310  return sharing_handles;
311  };
312 
313  auto get_parent_and_bit = [&](const auto ent) {
314  EntityHandle parent;
316  m_field.get_moab().tag_get_data(th_RefParentHandle, &ent, 1, &parent),
317  "get parent");
320  m_field.get_moab().tag_get_data(th_RefBitLevel, &ent, 1, &bit),
321  "get parent");
322  return std::make_pair(parent, bit);
323  };
324 
325  auto set_parent = [&](const auto ent, const auto parent) {
326  return m_field.get_moab().tag_set_data(th_RefParentHandle, &ent, 1,
327  &parent);
328  };
329 
330  auto set_bit = [&](const auto ent, const auto bit) {
331  return m_field.get_moab().tag_set_data(th_RefBitLevel, &ent, 1, &bit);
332  };
333 
334  // make a buffer
335  std::vector<std::vector<unsigned long long>> sbuffer(m_field.get_comm_size());
336 
337  for (auto ent : shared) {
338 
339  auto pstatus = get_pstatus(ent);
340  auto sharing_procs = get_sharing_procs(ent, pstatus);
341  auto sharing_handles = get_sharing_handles(ent, pstatus);
342  auto [parent, bit] = get_parent_and_bit(ent);
343 
344  if (verb >= NOISY)
345  MOFEM_LOG("SYNC", Sev::noisy) << "pstatus " << std::bitset<8>(pstatus);
346 
347  if (parent) {
348  auto pstatus_parent = get_pstatus(parent);
349  auto sharing_procs_parent = get_sharing_procs(parent, pstatus_parent);
350  auto sharing_handles_parent = get_sharing_handles(parent, pstatus_parent);
351 
352  for (int proc = 0; proc < MAX_SHARING_PROCS && -1 != sharing_procs[proc];
353  proc++) {
354  if (sharing_procs[proc] == -1)
355  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSSIBLE_CASE,
356  "sharing processor not set");
357 
358  if (sharing_procs[proc] != m_field.get_comm_rank()) {
359 
360  auto it = std::find(sharing_procs_parent.begin(),
361  sharing_procs_parent.end(), sharing_procs[proc]);
362  if (it == sharing_procs_parent.end()) {
363  SETERRQ1(
364  PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
365  "Sharing proc for parent entity can not be found proc = %u",
366  sharing_procs[proc]);
367  }
368 
369  auto handle_on_sharing_proc = sharing_handles[proc];
370  auto parent_handle_on_sharing_proc =
371  sharing_handles_parent[std::distance(sharing_procs_parent.begin(),
372  it)];
373  sbuffer[sharing_procs[proc]].push_back(handle_on_sharing_proc);
374  sbuffer[sharing_procs[proc]].push_back(parent_handle_on_sharing_proc);
375  try {
376  sbuffer[sharing_procs[proc]].push_back(bit.to_ullong());
377  } catch (std::exception &ex) {
378  MOFEM_LOG("SELF", Sev::warning) << ex.what();
379  MOFEM_LOG("SELF", Sev::warning)
380  << "On " << ent << " "
381  << moab::CN::EntityTypeName(type_from_handle(ent));
382  MOFEM_LOG("SELF", Sev::warning) << "For bit ref " << bit;
383  }
384  if (verb >= NOISY)
385  MOFEM_LOG_C("SYNC", Sev::noisy, "send %lu (%lu) to %d at %d\n", ent,
386  handle_on_sharing_proc, sharing_procs[proc],
387  m_field.get_comm_rank());
388 
389  if (!(pstatus & PSTATUS_MULTISHARED))
390  break;
391  }
392  }
393  } else {
394  for (int proc = 0; proc < MAX_SHARING_PROCS && -1 != sharing_procs[proc];
395  proc++) {
396  if (sharing_procs[proc] == -1)
397  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSSIBLE_CASE,
398  "sharing processor not set");
399 
400  if (sharing_procs[proc] != m_field.get_comm_rank()) {
401  auto handle_on_sharing_proc = sharing_handles[proc];
402  sbuffer[sharing_procs[proc]].push_back(handle_on_sharing_proc);
403  sbuffer[sharing_procs[proc]].push_back(parent);
404 
405  try {
406  sbuffer[sharing_procs[proc]].push_back(bit.to_ullong());
407  } catch (std::exception &ex) {
408  MOFEM_LOG("SELF", Sev::warning) << ex.what();
409  MOFEM_LOG("SELF", Sev::warning)
410  << "On " << ent << " "
411  << moab::CN::EntityTypeName(type_from_handle(ent));
412  MOFEM_LOG("SELF", Sev::warning) << "For bit ref " << bit;
413  }
414 
415  if (verb >= NOISY)
416  MOFEM_LOG_C("SYNC", Sev::noisy, "send %lu (%lu) to %d at %d\n", ent,
417  handle_on_sharing_proc, sharing_procs[proc],
418  m_field.get_comm_rank());
419 
420  if (!(pstatus & PSTATUS_MULTISHARED))
421  break;
422  }
423  }
424  }
425  }
426 
427  int nsends = 0; // number of messages to send
428  std::vector<int> sbuffer_lengths(
429  m_field.get_comm_size()); // length of the message to proc
430 
431  const size_t block_size = sizeof(unsigned long long) / sizeof(int);
432  for (int proc = 0; proc < m_field.get_comm_size(); proc++) {
433 
434  if (!sbuffer[proc].empty()) {
435 
436  sbuffer_lengths[proc] = sbuffer[proc].size() * block_size;
437  nsends++;
438 
439  } else {
440 
441  sbuffer_lengths[proc] = 0;
442  }
443  }
444 
445  // Make sure it is a PETSc m_field.get_comm()
446  MPI_Comm comm;
447  CHKERR PetscCommDuplicate(m_field.get_comm(), &comm, NULL);
448 
449  std::vector<MPI_Status> status(m_field.get_comm_size());
450 
451  // Computes the number of messages a node expects to receive
452  int nrecvs; // number of messages received
453  CHKERR PetscGatherNumberOfMessages(comm, NULL, &sbuffer_lengths[0], &nrecvs);
454 
455  // Computes info about messages that a MPI-node will receive, including
456  // (from-id,length) pairs for each message.
457  int *onodes; // list of node-ids from which messages are expected
458  int *olengths; // corresponding message lengths
459  CHKERR PetscGatherMessageLengths(comm, nsends, nrecvs, &sbuffer_lengths[0],
460  &onodes, &olengths);
461 
462  // Gets a unique new tag from a PETSc communicator. All processors that share
463  // the communicator MUST call this routine EXACTLY the same number of times.
464  // This tag should only be used with the current objects communicator; do NOT
465  // use it with any other MPI communicator.
466  int tag;
467  CHKERR PetscCommGetNewTag(comm, &tag);
468 
469  // Allocate a buffer sufficient to hold messages of size specified in
470  // olengths. And post Irecvs on these buffers using node info from onodes
471  int **rbuf; // must bee freed by user
472  MPI_Request *r_waits; // must bee freed by user
473  // rbuf has a pointers to messages. It has size of of nrecvs (number of
474  // messages) +1. In the first index a block is allocated,
475  // such that rbuf[i] = rbuf[i-1]+olengths[i-1].
476  CHKERR PetscPostIrecvInt(comm, tag, nrecvs, onodes, olengths, &rbuf,
477  &r_waits);
478 
479  MPI_Request *s_waits; // status of sens messages
480  CHKERR PetscMalloc1(nsends, &s_waits);
481 
482  // Send messages
483  for (int proc = 0, kk = 0; proc < m_field.get_comm_size(); proc++) {
484  if (!sbuffer_lengths[proc])
485  continue; // no message to send to this proc
486  CHKERR MPI_Isend(&(sbuffer[proc])[0], // buffer to send
487  sbuffer_lengths[proc], // message length
488  MPIU_INT, proc, // to proc
489  tag, comm, s_waits + kk);
490  kk++;
491  }
492 
493  // Wait for received
494  if (nrecvs)
495  CHKERR MPI_Waitall(nrecvs, r_waits, &status[0]);
496 
497  // Wait for send messages
498  if (nsends)
499  CHKERR MPI_Waitall(nsends, s_waits, &status[0]);
500 
501  if (verb >= VERBOSE) {
502  MOFEM_LOG_C("SYNC", Sev::verbose,
503  "Rank %d nb. shared to synchronise parents ents %u\n",
504  m_field.get_comm_rank(), shared.size());
505  }
506 
507  // synchronise range
508  for (int kk = 0; kk < nrecvs; kk++) {
509 
510  int len = olengths[kk];
511  int *data_from_proc = rbuf[kk];
512 
513  for (int ee = 0; ee < len;) {
514  EntityHandle ent;
515  EntityHandle parent;
516  unsigned long long uulong_bit;
517  bcopy(&data_from_proc[ee], &ent, sizeof(EntityHandle));
518  ee += block_size;
519  bcopy(&data_from_proc[ee], &parent, sizeof(EntityHandle));
520  ee += block_size;
521  bcopy(&data_from_proc[ee], &uulong_bit, sizeof(unsigned long long));
522  ee += block_size;
523 
524  CHKERR set_parent(ent, parent);
525  CHKERR set_bit(ent, BitRefLevel(uulong_bit));
526 
527  if (verb >= VERBOSE) {
528  MOFEM_LOG_C("SYNC", Sev::noisy, "received %lu (%lu) from %d at %d\n",
529  ent, parent, onodes[kk], m_field.get_comm_rank());
530  MOFEM_LOG("SYNC", Sev::noisy) << "Bit " << BitRefLevel(uulong_bit);
531  }
532  }
533  }
534 
535  // Cleaning
536  CHKERR PetscFree(s_waits);
537  CHKERR PetscFree(rbuf[0]);
538  CHKERR PetscFree(rbuf);
539  CHKERR PetscFree(r_waits);
540  CHKERR PetscFree(onodes);
541  CHKERR PetscFree(olengths);
542  CHKERR PetscCommDestroy(&comm);
543 
544  if (verb >= VERBOSE)
545  MOFEM_LOG_SYNCHRONISE(m_field.get_comm());
546 
548 }
549 
551  const Problem *problem_ptr, const std::string &fe_name, int verb) {
552  MoFEM::Interface &m_field = cOre;
554  ParallelComm *pcomm = ParallelComm::get_pcomm(
555  &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
556  std::vector<int> shprocs(MAX_SHARING_PROCS, 0);
557  std::vector<EntityHandle> shhandles(MAX_SHARING_PROCS, 0);
558  Range ents;
559  Tag th_gid = m_field.get_moab().globalId_tag();
560  PetscLayout layout;
561  CHKERR problem_ptr->getNumberOfElementsByNameAndPart(m_field.get_comm(),
562  fe_name, &layout);
563  int gid, last_gid;
564  CHKERR PetscLayoutGetRange(layout, &gid, &last_gid);
565  CHKERR PetscLayoutDestroy(&layout);
566 
567  const FiniteElement_multiIndex *fes_ptr;
568  CHKERR m_field.get_finite_elements(&fes_ptr);
569 
570  auto fe_miit = fes_ptr->get<FiniteElement_name_mi_tag>().find(fe_name);
571  if (fe_miit != fes_ptr->get<FiniteElement_name_mi_tag>().end()) {
572  auto fit =
573  problem_ptr->numeredFiniteElementsPtr->get<Unique_mi_tag>().lower_bound(
575  0, (*fe_miit)->getFEUId()));
576  auto hi_fe_it =
577  problem_ptr->numeredFiniteElementsPtr->get<Unique_mi_tag>().upper_bound(
579  get_id_for_max_type<MBENTITYSET>(), (*fe_miit)->getFEUId()));
580  for (; fit != hi_fe_it; ++fit) {
581 
582  auto ent = (*fit)->getEnt();
583  auto part = (*fit)->getPart();
584 
585  ents.insert(ent);
586  CHKERR m_field.get_moab().tag_set_data(pcomm->part_tag(), &ent, 1, &part);
587  if (part == pcomm->rank()) {
588  CHKERR m_field.get_moab().tag_set_data(th_gid, &ent, 1, &gid);
589  gid++;
590  }
591  shprocs.clear();
592  shhandles.clear();
593 
594  if (pcomm->size() > 1) {
595 
596  unsigned char pstatus = 0;
597  if (pcomm->rank() != part) {
598  pstatus = PSTATUS_NOT_OWNED;
599  pstatus |= PSTATUS_GHOST;
600  }
601 
602  if (pcomm->size() > 2) {
603  pstatus |= PSTATUS_SHARED;
604  pstatus |= PSTATUS_MULTISHARED;
605  } else {
606  pstatus |= PSTATUS_SHARED;
607  }
608 
609  size_t rrr = 0;
610  for (size_t rr = 0; rr < pcomm->size(); ++rr) {
611  if (rr != pcomm->rank()) {
612  shhandles[rrr] = ent;
613  shprocs[rrr] = rr;
614  ++rrr;
615  }
616  }
617  for (; rrr != pcomm->size(); ++rrr)
618  shprocs[rrr] = -1;
619 
620  if (pstatus & PSTATUS_SHARED) {
621  CHKERR m_field.get_moab().tag_set_data(pcomm->sharedp_tag(), &ent, 1,
622  &shprocs[0]);
623  CHKERR m_field.get_moab().tag_set_data(pcomm->sharedh_tag(), &ent, 1,
624  &shhandles[0]);
625  }
626 
627  if (pstatus & PSTATUS_MULTISHARED) {
628  CHKERR m_field.get_moab().tag_set_data(pcomm->sharedps_tag(), &ent, 1,
629  &shprocs[0]);
630  CHKERR m_field.get_moab().tag_set_data(pcomm->sharedhs_tag(), &ent, 1,
631  &shhandles[0]);
632  }
633  CHKERR m_field.get_moab().tag_set_data(pcomm->pstatus_tag(), &ent, 1,
634  &pstatus);
635  }
636  }
637  }
638 
639  CHKERR pcomm->exchange_tags(th_gid, ents);
641 }
642 
644  const std::string name, const std::string &fe_name, int verb) {
645  MoFEM::Interface &m_field = cOre;
647  const Problem *problem_ptr;
648  CHKERR m_field.get_problem(name, &problem_ptr);
649  CHKERR resolveSharedFiniteElements(problem_ptr, fe_name, verb);
651 }
652 
655  const int num_entities,
656  const int owner_proc, int verb) {
657  MoFEM::Interface &m_field = cOre;
659 
660  if (m_field.get_comm_size() > 1) {
661 
662  ParallelComm *pcomm = ParallelComm::get_pcomm(
663  &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
664 
665  Range all_ents_range;
666  all_ents_range.insert_list(entities, entities + num_entities);
667 
668  auto get_tag = [&]() {
669  const int negone = -1;
670  Tag th;
671  m_field.get_moab().tag_get_handle("TMP_GLOBAL_ID_TAG_NAME", 1,
672  MB_TYPE_INTEGER, th,
673  MB_TAG_CREAT | MB_TAG_SPARSE, &negone);
674  return th;
675  };
676 
677  auto delete_tag = [&](auto &&th_gid) {
679  CHKERR m_field.get_moab().tag_delete(th_gid);
681  };
682 
683  auto resolve_shared_ents = [&](auto &&th_gid, auto &all_ents_range) {
684  auto set_gid = [&](auto &th_gid) {
685  std::vector<int> gids(num_entities);
686  for (size_t g = 0; g != all_ents_range.size(); ++g)
687  gids[g] = g + 1;
688  CHKERR m_field.get_moab().tag_set_data(th_gid, all_ents_range,
689  &*gids.begin());
690 
691  return &th_gid;
692  };
693 
694  auto get_skin_ents = [&](auto &all_ents_range) {
695  std::array<Range, 4> proc_ents_skin;
696  proc_ents_skin[3] = all_ents_range.subset_by_dimension(3);
697  proc_ents_skin[2] = all_ents_range.subset_by_dimension(2);
698  proc_ents_skin[1] = all_ents_range.subset_by_dimension(1);
699  proc_ents_skin[0] = all_ents_range.subset_by_dimension(0);
700  return proc_ents_skin;
701  };
702 
703  auto resolve_dim = [&](auto &all_ents_range) {
704  for (int resolve_dim = 3; resolve_dim >= 0; --resolve_dim) {
705  if (all_ents_range.num_of_dimension(resolve_dim))
706  return resolve_dim;
707  }
708  return -1;
709  };
710 
711  auto get_proc_ent = [&](auto &all_ents_range) {
712  Range proc_ent;
713  if (m_field.get_comm_rank() == owner_proc)
714  proc_ent = all_ents_range;
715  return proc_ent;
716  };
717 
718  auto resolve_shared_ents = [&](auto &&proc_ents, auto &&skin_ents) {
719  return pcomm->resolve_shared_ents(
720  0, proc_ents, resolve_dim(all_ents_range),
721  resolve_dim(all_ents_range), skin_ents.data(), set_gid(th_gid));
722  };
723 
724  CHKERR resolve_shared_ents(get_proc_ent(all_ents_range),
725  get_skin_ents(all_ents_range));
726 
727  return th_gid;
728  };
729 
730  CHKERR delete_tag(resolve_shared_ents(get_tag(), all_ents_range));
731 
732  if (verb >= NOISY) {
733 
734  auto print_owner = [&](const EntityHandle e) {
736  int moab_owner_proc;
737  EntityHandle moab_owner_handle;
738  CHKERR pcomm->get_owner_handle(e, moab_owner_proc, moab_owner_handle);
739 
740  unsigned char pstatus = 0;
741 
742  CHKERR m_field.get_moab().tag_get_data(pcomm->pstatus_tag(), &e, 1,
743  &pstatus);
744 
745  std::vector<int> shprocs(MAX_SHARING_PROCS, 0);
746  std::vector<EntityHandle> shhandles(MAX_SHARING_PROCS, 0);
747 
748  CHKERR m_field.get_moab().tag_get_data(pcomm->sharedp_tag(), &e, 1,
749  &shprocs[0]);
750  CHKERR m_field.get_moab().tag_get_data(pcomm->sharedh_tag(), &e, 1,
751  &shhandles[0]);
752  if (pstatus & PSTATUS_MULTISHARED) {
753  CHKERR m_field.get_moab().tag_get_data(pcomm->sharedps_tag(), &e, 1,
754  &shprocs[0]);
755  CHKERR m_field.get_moab().tag_get_data(pcomm->sharedhs_tag(), &e, 1,
756  &shhandles[0]);
757  }
758 
759  std::ostringstream ss;
760 
761  ss << "Rank " << m_field.get_comm_rank() << " ";
762  if (!(pstatus & PSTATUS_NOT_OWNED))
763  ss << "OWNER ";
764  if (pstatus & PSTATUS_SHARED)
765  ss << "PSTATUS_SHARED ";
766  if (pstatus & PSTATUS_MULTISHARED)
767  ss << "PSTATUS_MULTISHARED ";
768 
769  ss << "owner " << moab_owner_proc << " (" << owner_proc << ") ";
770 
771  ss << "shprocs: ";
772  for (size_t r = 0; r != m_field.get_comm_size() + 1; ++r)
773  ss << shprocs[r] << " ";
774 
775  ss << "shhandles: ";
776  for (size_t r = 0; r != m_field.get_comm_size() + 1; ++r)
777  ss << shhandles[r] << " ";
778 
779  ss << std::endl;
780  MOFEM_LOG("SYNC", Sev::noisy) << ss.str();
781  MOFEM_LOG_SYNCHRONISE(m_field.get_comm());
782 
784  };
785 
786  for (auto e : all_ents_range)
787  CHKERR print_owner(e);
788  }
789  }
790 
792 }
793 
795  const int owner_proc,
796  int verb) {
797  MoFEM::Interface &m_field = cOre;
799  if (m_field.get_comm_size() > 1) {
800  const int num_ents = entities.size();
801  std::vector<EntityHandle> vec_ents(num_ents);
802  std::copy(entities.begin(), entities.end(), vec_ents.begin());
803  CHKERR makeEntitiesMultishared(&*vec_ents.begin(), num_ents, owner_proc,
804  verb);
805  }
807 }
808 
811  const int owner_proc, int verb) {
812  MoFEM::Interface &m_field = cOre;
814  if (m_field.get_comm_size() > 1) {
815  EntityHandle field_meshset = m_field.get_field_meshset(field_name);
816  std::vector<EntityHandle> field_ents;
817  CHKERR m_field.get_moab().get_entities_by_handle(field_meshset, field_ents,
818  true);
819  CHKERR makeEntitiesMultishared(&*field_ents.begin(), field_ents.size(),
820  owner_proc, verb);
821  }
823 }
824 
826  int verb) {
827  MoFEM::Interface &m_field = cOre;
829  if (m_field.get_comm_size() > 1) {
830 
831  Range exchange_ents_data_verts, exchange_ents_data;
832 
833  auto *field_ents = m_field.get_field_ents();
834  auto field_bit_number = m_field.get_field_bit_number(field_name);
835  auto lo = field_ents->get<Unique_mi_tag>().lower_bound(
836  FieldEntity::getLoBitNumberUId(field_bit_number));
837  auto hi = field_ents->get<Unique_mi_tag>().lower_bound(
838  FieldEntity::getHiBitNumberUId(field_bit_number));
839 
840  for (auto it = lo; it != hi; ++it)
841  if (
842 
843  ((*it)->getPStatus()) &&
844 
845  (*it)->getNbDofsOnEnt()
846 
847  ) {
848  if ((*it)->getEntType() == MBVERTEX)
849  exchange_ents_data_verts.insert((*it)->getEnt());
850  else
851  exchange_ents_data.insert((*it)->getEnt());
852  }
853 
854  auto field_ptr = m_field.get_field_structure(field_name);
855  ParallelComm *pcomm = ParallelComm::get_pcomm(
856  &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
857 
858  auto exchange = [&](const Range &ents, Tag th) {
860  if (!ents.empty()) {
861  std::vector<Tag> tags;
862  tags.push_back(th);
863  CHKERR pcomm->exchange_tags(tags, tags, ents);
864  }
866  };
867 
868  CHKERR exchange(exchange_ents_data_verts, field_ptr->th_FieldDataVerts);
869  CHKERR exchange(exchange_ents_data, field_ptr->th_FieldData);
870  }
872 }
873 
875 CommInterface::partitionMesh(const Range &ents, const int dim,
876  const int adj_dim, const int n_parts,
877  Tag *th_vertex_weights, Tag *th_edge_weights,
878  Tag *th_part_weights, int verb, const bool debug) {
879  MoFEM::Interface &m_field = cOre;
881 
882  // get layout
883  int rstart, rend, nb_elems;
884  {
885  PetscLayout layout;
886  CHKERR PetscLayoutCreate(m_field.get_comm(), &layout);
887  CHKERR PetscLayoutSetBlockSize(layout, 1);
888  CHKERR PetscLayoutSetSize(layout, ents.size());
889  CHKERR PetscLayoutSetUp(layout);
890  CHKERR PetscLayoutGetSize(layout, &nb_elems);
891  CHKERR PetscLayoutGetRange(layout, &rstart, &rend);
892  CHKERR PetscLayoutDestroy(&layout);
893  if (verb >= VERBOSE) {
894  MOFEM_LOG("SYNC", Sev::inform)
895  << "Finite elements in problem: row lower " << rstart << " row upper "
896  << rend << " nb. elems " << nb_elems << " ( " << ents.size() << " )";
898  }
899  }
900 
901  std::vector<EntityHandle> weight_ents;
902  weight_ents.reserve(rend - rstart + 1);
903 
904  struct AdjBridge {
905  EntityHandle ent;
906  std::vector<int> adj;
907  AdjBridge(const EntityHandle ent, std::vector<int> &adj)
908  : ent(ent), adj(adj) {}
909  };
910 
911  typedef multi_index_container<
912  AdjBridge,
913  indexed_by<
914 
915  hashed_unique<member<AdjBridge, EntityHandle, &AdjBridge::ent>>
916 
917  >>
918  AdjBridgeMap;
919 
920  auto get_it = [&](auto i) {
921  auto it = ents.begin();
922  for (; i > 0; --i) {
923  if (it == ents.end())
924  break;
925  ++it;
926  }
927  return it;
928  };
929 
930  Range proc_ents;
931  proc_ents.insert(get_it(rstart), get_it(rend));
932  if (proc_ents.size() != rend - rstart)
933  SETERRQ2(PETSC_COMM_WORLD, MOFEM_DATA_INCONSISTENCY,
934  "Wrong number of elements in range %d != %d", proc_ents.size(),
935  rend - rstart);
936 
937  Range all_dim_ents;
938  CHKERR m_field.get_moab().get_adjacencies(
939  proc_ents, adj_dim, true, all_dim_ents, moab::Interface::UNION);
940 
941  AdjBridgeMap adj_bridge_map;
942  auto hint = adj_bridge_map.begin();
943  std::vector<int> adj;
944  for (auto ent : all_dim_ents) {
945  Range adj_ents;
946  CHKERR m_field.get_moab().get_adjacencies(&ent, 1, dim, false, adj_ents);
947  adj_ents = intersect(adj_ents, ents);
948  adj.clear();
949  adj.reserve(adj_ents.size());
950  for (auto a : adj_ents)
951  adj.emplace_back(ents.index(a));
952  hint = adj_bridge_map.emplace_hint(hint, ent, adj);
953  }
954 
955  int *_i;
956  int *_j;
957  {
958  const int nb_loc_elements = rend - rstart;
959  std::vector<int> i(nb_loc_elements + 1, 0), j;
960  {
961  std::vector<int> row_adj;
962  Range::iterator fe_it;
963  int ii, jj;
964  size_t max_row_size;
965  for (fe_it = proc_ents.begin(), ii = rstart, jj = 0, max_row_size = 0;
966  fe_it != proc_ents.end(); ++fe_it, ++ii) {
967 
968  if (type_from_handle(*fe_it) == MBENTITYSET) {
969  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED,
970  "not yet implemented, don't know what to do for meshset "
971  "element");
972  } else {
973 
974  Range dim_ents;
975  CHKERR m_field.get_moab().get_adjacencies(&*fe_it, 1, adj_dim, false,
976  dim_ents);
977  dim_ents = intersect(dim_ents, all_dim_ents);
978 
979  row_adj.clear();
980  for (auto e : dim_ents) {
981  auto adj_it = adj_bridge_map.find(e);
982  if (adj_it != adj_bridge_map.end()) {
983 
984  for (const auto idx : adj_it->adj)
985  row_adj.push_back(idx);
986 
987  } else
988  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
989  "Entity not found");
990  }
991 
992  std::sort(row_adj.begin(), row_adj.end());
993  auto end = std::unique(row_adj.begin(), row_adj.end());
994 
995  size_t row_size = std::distance(row_adj.begin(), end);
996  max_row_size = std::max(max_row_size, row_size);
997  if (j.capacity() < (nb_loc_elements - jj) * max_row_size)
998  j.reserve(nb_loc_elements * max_row_size);
999 
1000  i[jj] = j.size();
1001  auto diag = ents.index(*fe_it);
1002  for (auto it = row_adj.begin(); it != end; ++it)
1003  if (*it != diag)
1004  j.push_back(*it);
1005  }
1006 
1007  ++jj;
1008 
1009  if (th_vertex_weights != NULL)
1010  weight_ents.push_back(*fe_it);
1011  }
1012 
1013  i[jj] = j.size();
1014  }
1015 
1016  CHKERR PetscMalloc(i.size() * sizeof(int), &_i);
1017  CHKERR PetscMalloc(j.size() * sizeof(int), &_j);
1018  copy(i.begin(), i.end(), _i);
1019  copy(j.begin(), j.end(), _j);
1020  }
1021 
1022  // get weights
1023  int *vertex_weights = NULL;
1024  if (th_vertex_weights != NULL) {
1025  CHKERR PetscMalloc(weight_ents.size() * sizeof(int), &vertex_weights);
1026  CHKERR m_field.get_moab().tag_get_data(*th_vertex_weights,
1027  &*weight_ents.begin(),
1028  weight_ents.size(), vertex_weights);
1029  }
1030 
1031  {
1032  Mat Adj;
1033  // Adjacency matrix used to partition problems, f.e. METIS
1034  CHKERR MatCreateMPIAdj(m_field.get_comm(), rend - rstart, nb_elems, _i, _j,
1035  PETSC_NULL, &Adj);
1036  CHKERR MatSetOption(Adj, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
1037 
1038  if (debug) {
1039  Mat A;
1040  CHKERR MatConvert(Adj, MATMPIAIJ, MAT_INITIAL_MATRIX, &A);
1041  CHKERR MatView(A, PETSC_VIEWER_DRAW_WORLD);
1042  std::string wait;
1043  std::cin >> wait;
1044  CHKERR MatDestroy(&A);
1045  }
1046 
1047  // run pets to do partitioning
1048  MOFEM_LOG("WORLD", Sev::verbose) << "Start";
1049 
1050  MatPartitioning part;
1051  IS is;
1052  CHKERR MatPartitioningCreate(m_field.get_comm(), &part);
1053 
1054  CHKERR MatPartitioningSetAdjacency(part, Adj);
1055  CHKERR MatPartitioningSetFromOptions(part);
1056  CHKERR MatPartitioningSetNParts(part, n_parts);
1057  if (th_vertex_weights != NULL) {
1058  CHKERR MatPartitioningSetVertexWeights(part, vertex_weights);
1059  }
1060  PetscBool same;
1061  PetscObjectTypeCompare((PetscObject)part, MATPARTITIONINGPARMETIS, &same);
1062  if (same) {
1063 #ifdef PARMETIS
1064  CHKERR MatPartitioningApply_Parmetis_MoFEM(part, &is);
1065 #endif
1066  } else {
1067  CHKERR MatPartitioningApply(part, &is);
1068  }
1069 
1070  MOFEM_LOG("WORLD", Sev::verbose) << "End";
1071 
1072  // gather
1073  IS is_gather, is_num, is_gather_num;
1074  CHKERR ISAllGather(is, &is_gather);
1075  CHKERR ISPartitioningToNumbering(is, &is_num);
1076  CHKERR ISAllGather(is_num, &is_gather_num);
1077 
1078  const int *part_number, *gids;
1079  CHKERR ISGetIndices(is_gather, &part_number);
1080  CHKERR ISGetIndices(is_gather_num, &gids);
1081 
1082  // set partition tag and gid tag to entities
1083  ParallelComm *pcomm = ParallelComm::get_pcomm(
1084  &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
1085  Tag part_tag = pcomm->part_tag();
1086  CHKERR m_field.get_moab().tag_set_data(part_tag, ents, part_number);
1087  Tag gid_tag = m_field.get_moab().globalId_tag();
1088 
1089  std::map<int, Range> parts_ents;
1090  {
1091  // get entities on each part
1092  Range::iterator eit = ents.begin();
1093  for (int ii = 0; eit != ents.end(); eit++, ii++) {
1094  parts_ents[part_number[ii]].insert(*eit);
1095  }
1096  Range tagged_sets;
1097  CHKERR m_field.get_moab().get_entities_by_type_and_tag(
1098  0, MBENTITYSET, &part_tag, NULL, 1, tagged_sets,
1099  moab::Interface::UNION);
1100  if (!tagged_sets.empty())
1101  CHKERR m_field.get_moab().tag_delete_data(part_tag, tagged_sets);
1102 
1103  if (n_parts > (int)tagged_sets.size()) {
1104  // too few partition sets - create missing ones
1105  int num_new = n_parts - tagged_sets.size();
1106  for (int i = 0; i < num_new; i++) {
1107  EntityHandle new_set;
1108  CHKERR m_field.get_moab().create_meshset(MESHSET_SET, new_set);
1109  tagged_sets.insert(new_set);
1110  }
1111  } else if (n_parts < (int)tagged_sets.size()) {
1112  // too many partition sets - delete extras
1113  int num_del = tagged_sets.size() - n_parts;
1114  for (int i = 0; i < num_del; i++) {
1115  EntityHandle old_set = tagged_sets.pop_back();
1116  CHKERR m_field.get_moab().delete_entities(&old_set, 1);
1117  }
1118  }
1119  // write a tag to those sets denoting they're partition sets, with a
1120  // value of the proc number
1121  std::vector<int> dum_ids(n_parts);
1122  for (int i = 0; i < n_parts; i++)
1123  dum_ids[i] = i;
1124  CHKERR m_field.get_moab().tag_set_data(part_tag, tagged_sets,
1125  &*dum_ids.begin());
1126  CHKERR m_field.get_moab().clear_meshset(tagged_sets);
1127 
1128  // get lower dimension entities on each part
1129  for (int pp = 0; pp != n_parts; pp++) {
1130  Range dim_ents = parts_ents[pp].subset_by_dimension(dim);
1131  for (int dd = dim - 1; dd >= 0; dd--) {
1132  Range adj_ents;
1133  CHKERR m_field.get_moab().get_adjacencies(
1134  dim_ents, dd, false, adj_ents, moab::Interface::UNION);
1135  parts_ents[pp].merge(adj_ents);
1136  }
1137  }
1138  for (int pp = 1; pp != n_parts; pp++) {
1139  for (int ppp = 0; ppp != pp; ppp++) {
1140  parts_ents[pp] = subtract(parts_ents[pp], parts_ents[ppp]);
1141  }
1142  }
1143 
1144  for (int pp = 0; pp != n_parts; pp++) {
1145  CHKERR m_field.get_moab().add_entities(tagged_sets[pp], parts_ents[pp]);
1146  }
1147 
1148  auto set_part = [&]() {
1150  for (EntityType t = MBEDGE; t != MBENTITYSET; ++t) {
1151  for (int pp = 0; pp != n_parts; pp++) {
1152  Range type_ents = parts_ents[pp].subset_by_type(t);
1153  CHKERR m_field.get_moab().tag_clear_data(part_tag, type_ents, &pp);
1154  }
1155  }
1157  };
1158 
1159  auto set_gid = [&]() {
1161  for (EntityType t = MBVERTEX; t != MBENTITYSET; ++t) {
1162 
1163  void *ptr;
1164  int count;
1165 
1166  int gid = 1; // moab indexing from 1a
1167  for (int pp = 0; pp != n_parts; pp++) {
1168  Range type_ents = parts_ents[pp].subset_by_type(t);
1169 
1170  auto eit = type_ents.begin();
1171  for (; eit != type_ents.end();) {
1172  CHKERR m_field.get_moab().tag_iterate(
1173  gid_tag, eit, type_ents.end(), count, ptr);
1174  auto gid_tag_ptr = static_cast<int *>(ptr);
1175  for (; count > 0; --count) {
1176  *gid_tag_ptr = gid;
1177  ++eit;
1178  ++gid;
1179  ++gid_tag_ptr;
1180  }
1181  }
1182  }
1183  }
1184 
1186  };
1187 
1188  CHKERR set_part();
1189  CHKERR set_gid();
1190 
1191  }
1192 
1193  if (debug) {
1194  if (m_field.get_comm_rank() == 0) {
1195  for (int rr = 0; rr != n_parts; rr++) {
1196  ostringstream ss;
1197  ss << "out_part_" << rr << ".vtk";
1198  MOFEM_LOG("SELF", Sev::inform) << "Save debug part mesh " << ss.str();
1199  EntityHandle meshset;
1200  CHKERR m_field.get_moab().create_meshset(MESHSET_SET, meshset);
1201  CHKERR m_field.get_moab().add_entities(meshset, parts_ents[rr]);
1202  CHKERR m_field.get_moab().write_file(ss.str().c_str(), "VTK", "",
1203  &meshset, 1);
1204  CHKERR m_field.get_moab().delete_entities(&meshset, 1);
1205  }
1206  }
1207  }
1208 
1209  CHKERR ISRestoreIndices(is_gather, &part_number);
1210  CHKERR ISRestoreIndices(is_gather_num, &gids);
1211  CHKERR ISDestroy(&is_num);
1212  CHKERR ISDestroy(&is_gather_num);
1213  CHKERR ISDestroy(&is_gather);
1214  CHKERR ISDestroy(&is);
1215  CHKERR MatPartitioningDestroy(&part);
1216  CHKERR MatDestroy(&Adj);
1217  }
1218 
1220 }
1221 
1222 } // namespace MoFEM
MoFEMFunctionReturnHot
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:460
CHK_MOAB_THROW
#define CHK_MOAB_THROW(err, msg)
Check error code of MoAB function and throw MoFEM exception.
Definition: definitions.h:589
g
constexpr double g
Definition: shallow_wave.cpp:63
MoFEM::debug
const static bool debug
Definition: ConstrainMatrixCtx.cpp:9
MoFEM::CoreTmp< 0 >
Core (interface) class.
Definition: Core.hpp:82
MOFEM_LOG_SEVERITY_SYNC
#define MOFEM_LOG_SEVERITY_SYNC(comm, severity)
Synchronise "SYNC" on curtain severity level.
Definition: LogManager.hpp:352
MoFEM::Problem::getNumberOfElementsByNameAndPart
MoFEMErrorCode getNumberOfElementsByNameAndPart(MPI_Comm comm, const std::string name, PetscLayout *layout) const
Get number of finite elements by name on processors.
Definition: ProblemsMultiIndices.cpp:98
EntityHandle
NOISY
@ NOISY
Definition: definitions.h:224
MoFEM::CoreInterface::get_comm
virtual MPI_Comm & get_comm() const =0
MoFEM::CoreInterface::get_field_structure
virtual const Field * get_field_structure(const std::string &name, enum MoFEMTypes bh=MF_EXIST) const =0
get field structure
MoFEM::Exceptions::MoFEMErrorCode
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
Definition: Exceptions.hpp:56
MoFEM::CoreInterface::get_field_bit_number
virtual FieldBitNumber get_field_bit_number(const std::string name) const =0
get field bit number
MoFEM::CoreInterface::get_comm_rank
virtual int get_comm_rank() const =0
MoFEM::th
Tag th
Definition: Projection10NodeCoordsOnField.cpp:122
MoFEM::CoreInterface::get_problem
virtual const Problem * get_problem(const std::string problem_name) const =0
Get the problem object.
A
constexpr AssemblyType A
Definition: operators_tests.cpp:30
MoFEM::CommInterface::partitionMesh
MoFEMErrorCode partitionMesh(const Range &ents, const int dim, const int adj_dim, const int n_parts, Tag *th_vertex_weights=nullptr, Tag *th_edge_weights=nullptr, Tag *th_part_weights=nullptr, int verb=VERBOSE, const bool debug=false)
Set partition tag to each finite element in the problem.
Definition: CommInterface.cpp:875
MoFEM::CoreInterface::get_finite_elements
virtual const FiniteElement_multiIndex * get_finite_elements() const =0
Get the finite elements object.
MoFEM::CoreInterface::get_field_meshset
virtual EntityHandle get_field_meshset(const std::string name) const =0
get field meshset
MoFEM::CommInterface::resolveParentEntities
MoFEMErrorCode resolveParentEntities(const Range &ent, int verb=DEFAULT_VERBOSITY)
Synchronise parent entities.
Definition: CommInterface.cpp:256
MOFEM_IMPOSSIBLE_CASE
@ MOFEM_IMPOSSIBLE_CASE
Definition: definitions.h:35
sdf.r
int r
Definition: sdf.py:8
MoFEM::DeprecatedCoreInterface
Deprecated interface functions.
Definition: DeprecatedCoreInterface.hpp:16
VERBOSE
@ VERBOSE
Definition: definitions.h:222
MoFEM::CommInterface::makeFieldEntitiesMultishared
MoFEMErrorCode makeFieldEntitiesMultishared(const std::string field_name, const int owner_proc=0, int verb=DEFAULT_VERBOSITY)
make field entities multi shared
Definition: CommInterface.cpp:810
CHKERR
#define CHKERR
Inline error check.
Definition: definitions.h:548
MoFEM::CoreInterface::get_moab
virtual moab::Interface & get_moab()=0
MoFEM
implementation of Data Operators for Forces and Sources
Definition: Common.hpp:10
a
constexpr double a
Definition: approx_sphere.cpp:30
MoFEM::CommInterface::query_interface
MoFEMErrorCode query_interface(boost::typeindex::type_index type_index, UnknownInterface **iface) const
Definition: CommInterface.cpp:16
MOFEM_LOG_C
#define MOFEM_LOG_C(channel, severity, format,...)
Definition: LogManager.hpp:311
bit
auto bit
set bit
Definition: hanging_node_approx.cpp:75
MoFEM::CommInterface::resolveSharedFiniteElements
MoFEMErrorCode resolveSharedFiniteElements(const Problem *problem_ptr, const std::string &fe_name, int verb=DEFAULT_VERBOSITY)
resolve shared entities for finite elements in the problem
Definition: CommInterface.cpp:550
MoFEM::CoreInterface::get_basic_entity_data_ptr
virtual boost::shared_ptr< BasicEntityData > & get_basic_entity_data_ptr()=0
Get pointer to basic entity data.
MoFEM::CoreTmp< 0 >::get_th_RefParentHandle
Tag get_th_RefParentHandle() const
Definition: Core.hpp:197
MoFEM::EntFiniteElement::getLocalUniqueIdCalculate
UId getLocalUniqueIdCalculate() const
Generate UId for finite element entity.
Definition: FEMultiIndices.hpp:528
MOFEM_LOG_SYNCHRONISE
#define MOFEM_LOG_SYNCHRONISE(comm)
Synchronise "SYNC" channel.
Definition: LogManager.hpp:345
MoFEM::CoreTmp< 0 >::get_th_RefBitLevel
Tag get_th_RefBitLevel() const
Definition: Core.hpp:198
MoFEM::FieldEntity::getLoBitNumberUId
static UId getLoBitNumberUId(const FieldBitNumber bit_number)
Definition: FieldEntsMultiIndices.hpp:222
MoFEM::CommInterface::synchroniseFieldEntities
MoFEMErrorCode synchroniseFieldEntities(const std::string name, int verb=DEFAULT_VERBOSITY)
Definition: CommInterface.cpp:244
MoFEM::CoreInterface::get_comm_size
virtual int get_comm_size() const =0
MoFEM::CoreInterface::get_field_ents
virtual const FieldEntity_multiIndex * get_field_ents() const =0
Get the field ents object.
MoFEM::type_from_handle
auto type_from_handle(const EntityHandle h)
get type from entity handle
Definition: Templates.hpp:1898
t
constexpr double t
plate stiffness
Definition: plate.cpp:58
i
FTensor::Index< 'i', SPACE_DIM > i
Definition: hcurl_divergence_operator_2d.cpp:27
MoFEM::Problem::numeredFiniteElementsPtr
boost::shared_ptr< NumeredEntFiniteElement_multiIndex > numeredFiniteElementsPtr
store finite elements
Definition: ProblemsMultiIndices.hpp:77
field_name
constexpr auto field_name
Definition: poisson_2d_homogeneous.cpp:13
MoFEM::CommInterface::synchroniseEntities
MoFEMErrorCode synchroniseEntities(Range &ent, std::map< int, Range > *received_ents, int verb=DEFAULT_VERBOSITY)
synchronize entity range on processors (collective)
Definition: CommInterface.cpp:26
MoFEM::UnknownInterface
base class for all interface classes
Definition: UnknownInterface.hpp:34
Range
FTensor::dd
const Tensor2_symmetric_Expr< const ddTensor0< T, Dim, i, j >, typename promote< T, double >::V, Dim, i, j > dd(const Tensor0< T * > &a, const Index< i, Dim > index1, const Index< j, Dim > index2, const Tensor1< int, Dim > &d_ijk, const Tensor1< double, Dim > &d_xyz)
Definition: ddTensor0.hpp:33
MOFEM_LOG
#define MOFEM_LOG(channel, severity)
Log.
Definition: LogManager.hpp:308
MoFEM::CommInterface
Managing BitRefLevels.
Definition: CommInterface.hpp:21
MoFEM::FiniteElement_name_mi_tag
Definition: TagMultiIndices.hpp:26
MoFEM::CommInterface::CommInterface
CommInterface(const MoFEM::Core &core)
Definition: CommInterface.cpp:23
j
FTensor::Index< 'j', 3 > j
Definition: matrix_function.cpp:19
MOFEM_DATA_INCONSISTENCY
@ MOFEM_DATA_INCONSISTENCY
Definition: definitions.h:31
MoFEM::FieldEntity::getHiBitNumberUId
static UId getHiBitNumberUId(const FieldBitNumber bit_number)
Definition: FieldEntsMultiIndices.hpp:228
MoFEM::CommInterface::makeEntitiesMultishared
MoFEMErrorCode makeEntitiesMultishared(const EntityHandle *entities, const int num_entities, const int owner_proc=0, int verb=DEFAULT_VERBOSITY)
make entities from proc 0 shared on all proc
Definition: CommInterface.cpp:654
FiniteElement_multiIndex
multi_index_container< boost::shared_ptr< FiniteElement >, indexed_by< hashed_unique< tag< FiniteElement_Meshset_mi_tag >, member< FiniteElement, EntityHandle, &FiniteElement::meshset > >, hashed_unique< tag< BitFEId_mi_tag >, const_mem_fun< FiniteElement, BitFEId, &FiniteElement::getId >, HashBit< BitFEId >, EqBit< BitFEId > >, ordered_unique< tag< FiniteElement_name_mi_tag >, const_mem_fun< FiniteElement, boost::string_ref, &FiniteElement::getNameRef > > > > FiniteElement_multiIndex
MultiIndex for entities for FiniteElement.
Definition: FEMultiIndices.hpp:849
MoFEM::CommInterface::cOre
MoFEM::Core & cOre
Definition: CommInterface.hpp:26
MoFEM::Types::BitRefLevel
std::bitset< BITREFLEVEL_SIZE > BitRefLevel
Bit structure attached to each entity identifying to what mesh entity is attached.
Definition: Types.hpp:40
MoFEMFunctionBeginHot
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:453
MoFEM::Unique_mi_tag
Definition: TagMultiIndices.hpp:18
MoFEM::Problem
keeps basic data about problem
Definition: ProblemsMultiIndices.hpp:54
MoFEM::CommInterface::exchangeFieldData
MoFEMErrorCode exchangeFieldData(const std::string field_name, int verb=DEFAULT_VERBOSITY)
Exchange field data.
Definition: CommInterface.cpp:825
MoFEMFunctionReturn
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:429
MOFEM_NOT_IMPLEMENTED
@ MOFEM_NOT_IMPLEMENTED
Definition: definitions.h:32
MoFEMFunctionBegin
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:359