v0.14.0
CommInterface.cpp
Go to the documentation of this file.
1 /** \file CommInterface.cpp
2  * \brief Functions for interprocessor communications
3  * \mofem_comm
4  */
5 
6 namespace MoFEM {
7 
8 #ifdef PARMETIS
9 
10 MoFEMErrorCode MatPartitioningApply_Parmetis_MoFEM(MatPartitioning part,
11  IS *partitioning);
12 
13 #endif // PARMETIS
14 
16 CommInterface::query_interface(boost::typeindex::type_index type_index,
17  UnknownInterface **iface) const {
19  *iface = const_cast<CommInterface *>(this);
21 }
22 
24  : cOre(const_cast<MoFEM::Core &>(core)), dEbug(false) {}
25 
27  Range &ents, std::map<int, Range> *received_ents, int verb) {
28  MoFEM::Interface &m_field = cOre;
29  ParallelComm *pcomm = ParallelComm::get_pcomm(
30  &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
32 
33  auto get_pstatus = [&](const auto ent) {
34  unsigned char pstatus;
35  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->pstatus_tag(), &ent,
36  1, &pstatus),
37  "can not get pstatus");
38  return pstatus;
39  };
40 
41  auto get_sharing_procs = [&](const auto ent, const auto pstatus) {
42  std::vector<int> sharing_procs(MAX_SHARING_PROCS, -1);
43  if (pstatus & PSTATUS_MULTISHARED) {
44  // entity is multi shared
45  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(
46  pcomm->sharedps_tag(), &ent, 1, &sharing_procs[0]),
47  "can not ger sharing_procs_ptr");
48  } else if (pstatus & PSTATUS_SHARED) {
49  // shared
50  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->sharedp_tag(), &ent,
51  1, &sharing_procs[0]),
52  "can not get sharing proc");
53  }
54  return sharing_procs;
55  };
56 
57  auto get_sharing_handles = [&](const auto ent, const auto pstatus) {
58  std::vector<EntityHandle> sharing_handles(MAX_SHARING_PROCS, 0);
59  if (pstatus & PSTATUS_MULTISHARED) {
60  // entity is multi shared
61  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(
62  pcomm->sharedhs_tag(), &ent, 1, &sharing_handles[0]),
63  "get shared handles");
64  } else if (pstatus & PSTATUS_SHARED) {
65  // shared
66  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->sharedh_tag(), &ent,
67  1, &sharing_handles[0]),
68  "get sharing handle");
69  }
70  return sharing_handles;
71  };
72 
73  // make a buffer entities to send
74  std::vector<std::vector<EntityHandle>> sbuffer(m_field.get_comm_size());
75 
76  for (auto ent : ents) {
77 
78  auto pstatus = get_pstatus(ent);
79  if (pstatus) {
80  auto sharing_procs = get_sharing_procs(ent, pstatus);
81  auto sharing_handles = get_sharing_handles(ent, pstatus);
82 
83  if (verb >= NOISY) {
84  MOFEM_LOG("SYNC", Sev::noisy) << "pstatus " << std::bitset<8>(pstatus);
85  }
86 
87  for (int proc = 0; proc < MAX_SHARING_PROCS && -1 != sharing_procs[proc];
88  proc++) {
89  if (sharing_procs[proc] == -1)
90  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSSIBLE_CASE,
91  "sharing processor not set");
92 
93  if (sharing_procs[proc] == m_field.get_comm_rank())
94  continue;
95 
96  const auto handle_on_sharing_proc = sharing_handles[proc];
97  // add entity to send, handle on the other side
98  sbuffer[sharing_procs[proc]].push_back(handle_on_sharing_proc);
99  if (verb >= NOISY) {
100  MOFEM_LOG_C("SYNC", Sev::noisy, "send %lu (%lu) to %d at %d\n", ent,
101  handle_on_sharing_proc, sharing_procs[proc],
102  m_field.get_comm_rank());
103  }
104 
105  if (!(pstatus & PSTATUS_MULTISHARED))
106  break;
107  }
108  }
109  }
110  if (verb >= NOISY) {
111  MOFEM_LOG_SEVERITY_SYNC(m_field.get_comm(), Sev::noisy);
112  }
113 
114  int nsends = 0; // number of messages to send
115  std::vector<int> sbuffer_lengths(
116  m_field.get_comm_size()); // length of the message to proc
117  const size_t block_size =
118  sizeof(EntityHandle) /
119  sizeof(int); // FIXME check if that works on given architecture!
120  for (int proc = 0; proc < m_field.get_comm_size(); proc++) {
121 
122  if (!sbuffer[proc].empty()) {
123 
124  sbuffer_lengths[proc] = sbuffer[proc].size() * block_size;
125  nsends++;
126 
127  } else {
128 
129  sbuffer_lengths[proc] = 0;
130  }
131  }
132 
133  // Make sure it is a PETSc m_field.get_comm()
134  MPI_Comm comm;
135  CHKERR PetscCommDuplicate(m_field.get_comm(), &comm, NULL);
136 
137  std::vector<MPI_Status> status(m_field.get_comm_size());
138 
139  // Computes the number of messages a node expects to receive
140  int nrecvs; // number of messages received
141  CHKERR PetscGatherNumberOfMessages(comm, NULL, &sbuffer_lengths[0], &nrecvs);
142 
143  // Computes info about messages that a MPI-node will receive, including
144  // (from-id,length) pairs for each message.
145  int *onodes; // list of node-ids from which messages are expected
146  int *olengths; // corresponding message lengths
147  CHKERR PetscGatherMessageLengths(comm, nsends, nrecvs, &sbuffer_lengths[0],
148  &onodes, &olengths);
149 
150  // Gets a unique new tag from a PETSc communicator. All processors that share
151  // the communicator MUST call this routine EXACTLY the same number of times.
152  // This tag should only be used with the current objects communicator; do NOT
153  // use it with any other MPI communicator.
154  int tag;
155  CHKERR PetscCommGetNewTag(comm, &tag);
156 
157  // Allocate a buffer sufficient to hold messages of size specified in
158  // olengths. And post Irecvs on these buffers using node info from onodes
159  int **rbuf; // must bee freed by user
160  MPI_Request *r_waits; // must bee freed by user
161  // rbuf has a pointers to messages. It has size of of nrecvs (number of
162  // messages) +1. In the first index a block is allocated,
163  // such that rbuf[i] = rbuf[i-1]+olengths[i-1].
164  CHKERR PetscPostIrecvInt(comm, tag, nrecvs, onodes, olengths, &rbuf,
165  &r_waits);
166 
167  MPI_Request *s_waits; // status of sens messages
168  CHKERR PetscMalloc1(nsends, &s_waits);
169 
170  // Send messages
171  for (int proc = 0, kk = 0; proc < m_field.get_comm_size(); proc++) {
172  if (!sbuffer_lengths[proc])
173  continue; // no message to send to this proc
174  CHKERR MPI_Isend(&(sbuffer[proc])[0], // buffer to send
175  sbuffer_lengths[proc], // message length
176  MPIU_INT, proc, // to proc
177  tag, comm, s_waits + kk);
178  kk++;
179  }
180 
181  // Wait for received
182  if (nrecvs)
183  CHKERR MPI_Waitall(nrecvs, r_waits, &status[0]);
184 
185  // Wait for send messages
186  if (nsends)
187  CHKERR MPI_Waitall(nsends, s_waits, &status[0]);
188 
189  if (verb >= VERBOSE) {
190  MOFEM_LOG_C("SYNC", Sev::verbose, "Rank %d nb. before ents %u\n",
191  m_field.get_comm_rank(), ents.size());
192  MOFEM_LOG_SEVERITY_SYNC(m_field.get_comm(), Sev::verbose);
193  }
194 
195  // synchronise range
196  for (int kk = 0; kk < nrecvs; kk++) {
197 
198  int len = olengths[kk];
199  int *data_from_proc = rbuf[kk];
200 
201  for (int ee = 0; ee < len;) {
202  EntityHandle ent;
203  bcopy(&data_from_proc[ee], &ent, sizeof(EntityHandle));
204  ents.insert(ent);
205 
206  if (received_ents) {
207  (*received_ents)[onodes[kk]].insert(ent);
208  }
209 
210  ee += block_size;
211 
212  if (verb >= VERBOSE) {
213  MOFEM_LOG_C("SYNC", Sev::noisy, "received %lu from %d at %d\n", ent,
214  onodes[kk], m_field.get_comm_rank());
215  }
216  }
217  }
218  if (verb >= VERBOSE) {
219  MOFEM_LOG_SEVERITY_SYNC(m_field.get_comm(), Sev::verbose);
220  }
221 
222  if (verb >= VERBOSE) {
223  MOFEM_LOG_C("SYNC", Sev::verbose, "Rank %d nb. after ents %u",
224  m_field.get_comm_rank(), ents.size());
225  MOFEM_LOG_SEVERITY_SYNC(m_field.get_comm(), Sev::verbose);
226  }
227 
228  // Cleaning
229  CHKERR PetscFree(s_waits);
230  CHKERR PetscFree(rbuf[0]);
231  CHKERR PetscFree(rbuf);
232  CHKERR PetscFree(r_waits);
233  CHKERR PetscFree(onodes);
234  CHKERR PetscFree(olengths);
235  CHKERR PetscCommDestroy(&comm);
236 
238 }
239 
241  return synchroniseEntities(ents, nullptr, verb);
242 }
243 
245  int verb) {
246  MoFEM::Interface &m_field = cOre;
248  EntityHandle idm = m_field.get_field_meshset(name);
249  Range ents;
250  CHKERR m_field.get_moab().get_entities_by_handle(idm, ents, false);
251  CHKERR synchroniseEntities(ents, nullptr, verb);
252  CHKERR m_field.get_moab().add_entities(idm, ents);
254 }
255 
257  int verb) {
258  MoFEM::Interface &m_field = cOre;
259  ParallelComm *pcomm = ParallelComm::get_pcomm(
260  &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
261 
263 
264  Range shared = ents;
265  CHKERR pcomm->filter_pstatus(shared, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1,
266  nullptr);
267  CHKERR pcomm->filter_pstatus(shared, PSTATUS_SHARED | PSTATUS_MULTISHARED,
268  PSTATUS_OR, -1, nullptr);
269 
270  auto th_RefParentHandle = cOre.get_th_RefParentHandle();
271  auto th_RefBitLevel = cOre.get_th_RefBitLevel();
272 
273  auto get_pstatus = [&](const auto ent) {
274  unsigned char pstatus;
275  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->pstatus_tag(), &ent,
276  1, &pstatus),
277  "can not get pstatus");
278  return pstatus;
279  };
280 
281  auto get_sharing_procs = [&](const auto ent, const auto pstatus) {
282  std::vector<int> sharing_procs(MAX_SHARING_PROCS, -1);
283  if (pstatus & PSTATUS_MULTISHARED) {
284  // entity is multi shared
285  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(
286  pcomm->sharedps_tag(), &ent, 1, &sharing_procs[0]),
287  "can not ger sharing_procs_ptr");
288  } else if (pstatus & PSTATUS_SHARED) {
289  // shared
290  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->sharedp_tag(), &ent,
291  1, &sharing_procs[0]),
292  "can not get sharing proc");
293  }
294  return sharing_procs;
295  };
296 
297  auto get_sharing_handles = [&](const auto ent, const auto pstatus) {
298  std::vector<EntityHandle> sharing_handles(MAX_SHARING_PROCS, 0);
299  if (pstatus & PSTATUS_MULTISHARED) {
300  // entity is multi shared
301  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(
302  pcomm->sharedhs_tag(), &ent, 1, &sharing_handles[0]),
303  "get shared handles");
304  } else if (pstatus & PSTATUS_SHARED) {
305  // shared
306  CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->sharedh_tag(), &ent,
307  1, &sharing_handles[0]),
308  "get sharing handle");
309  }
310  return sharing_handles;
311  };
312 
313  auto get_parent_and_bit = [&](const auto ent) {
314  EntityHandle parent;
316  m_field.get_moab().tag_get_data(th_RefParentHandle, &ent, 1, &parent),
317  "get parent");
320  m_field.get_moab().tag_get_data(th_RefBitLevel, &ent, 1, &bit),
321  "get parent");
322  return std::make_pair(parent, bit);
323  };
324 
325  auto set_parent = [&](const auto ent, const auto parent) {
326  return m_field.get_moab().tag_set_data(th_RefParentHandle, &ent, 1,
327  &parent);
328  };
329 
330  auto set_bit = [&](const auto ent, const auto bit) {
331  return m_field.get_moab().tag_set_data(th_RefBitLevel, &ent, 1, &bit);
332  };
333 
334  // make a buffer
335  std::vector<std::vector<unsigned long long>> sbuffer(m_field.get_comm_size());
336 
337  for (auto ent : shared) {
338 
339  auto pstatus = get_pstatus(ent);
340  auto sharing_procs = get_sharing_procs(ent, pstatus);
341  auto sharing_handles = get_sharing_handles(ent, pstatus);
342  auto [parent, bit] = get_parent_and_bit(ent);
343 
344  if (verb >= NOISY)
345  MOFEM_LOG("SYNC", Sev::noisy) << "pstatus " << std::bitset<8>(pstatus);
346 
347  if (parent) {
348  auto pstatus_parent = get_pstatus(parent);
349  auto sharing_procs_parent = get_sharing_procs(parent, pstatus_parent);
350  auto sharing_handles_parent = get_sharing_handles(parent, pstatus_parent);
351 
352  for (int proc = 0; proc < MAX_SHARING_PROCS && -1 != sharing_procs[proc];
353  proc++) {
354  if (sharing_procs[proc] == -1)
355  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSSIBLE_CASE,
356  "sharing processor not set");
357 
358  if (sharing_procs[proc] != m_field.get_comm_rank()) {
359 
360  auto it = std::find(sharing_procs_parent.begin(),
361  sharing_procs_parent.end(), sharing_procs[proc]);
362  if (it == sharing_procs_parent.end()) {
363  SETERRQ1(
364  PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
365  "Sharing proc for parent entity can not be found proc = %u",
366  sharing_procs[proc]);
367  }
368 
369  auto handle_on_sharing_proc = sharing_handles[proc];
370  auto parent_handle_on_sharing_proc =
371  sharing_handles_parent[std::distance(sharing_procs_parent.begin(),
372  it)];
373  sbuffer[sharing_procs[proc]].push_back(handle_on_sharing_proc);
374  sbuffer[sharing_procs[proc]].push_back(parent_handle_on_sharing_proc);
375  try {
376  sbuffer[sharing_procs[proc]].push_back(bit.to_ullong());
377  } catch (std::exception &ex) {
378  MOFEM_LOG("SELF", Sev::warning) << ex.what();
379  MOFEM_LOG("SELF", Sev::warning)
380  << "On " << ent << " "
381  << moab::CN::EntityTypeName(type_from_handle(ent));
382  MOFEM_LOG("SELF", Sev::warning) << "For bit ref " << bit;
383  }
384  if (verb >= NOISY)
385  MOFEM_LOG_C("SYNC", Sev::noisy, "send %lu (%lu) to %d at %d\n", ent,
386  handle_on_sharing_proc, sharing_procs[proc],
387  m_field.get_comm_rank());
388 
389  if (!(pstatus & PSTATUS_MULTISHARED))
390  break;
391  }
392  }
393  } else {
394  for (int proc = 0; proc < MAX_SHARING_PROCS && -1 != sharing_procs[proc];
395  proc++) {
396  if (sharing_procs[proc] == -1)
397  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSSIBLE_CASE,
398  "sharing processor not set");
399 
400  if (sharing_procs[proc] != m_field.get_comm_rank()) {
401  auto handle_on_sharing_proc = sharing_handles[proc];
402  sbuffer[sharing_procs[proc]].push_back(handle_on_sharing_proc);
403  sbuffer[sharing_procs[proc]].push_back(parent);
404 
405  try {
406  sbuffer[sharing_procs[proc]].push_back(bit.to_ullong());
407  } catch (std::exception &ex) {
408  MOFEM_LOG("SELF", Sev::warning) << ex.what();
409  MOFEM_LOG("SELF", Sev::warning)
410  << "On " << ent << " "
411  << moab::CN::EntityTypeName(type_from_handle(ent));
412  MOFEM_LOG("SELF", Sev::warning) << "For bit ref " << bit;
413  }
414 
415  if (verb >= NOISY)
416  MOFEM_LOG_C("SYNC", Sev::noisy, "send %lu (%lu) to %d at %d\n", ent,
417  handle_on_sharing_proc, sharing_procs[proc],
418  m_field.get_comm_rank());
419 
420  if (!(pstatus & PSTATUS_MULTISHARED))
421  break;
422  }
423  }
424  }
425  }
426 
427  int nsends = 0; // number of messages to send
428  std::vector<int> sbuffer_lengths(
429  m_field.get_comm_size()); // length of the message to proc
430 
431  const size_t block_size = sizeof(unsigned long long) / sizeof(int);
432  for (int proc = 0; proc < m_field.get_comm_size(); proc++) {
433 
434  if (!sbuffer[proc].empty()) {
435 
436  sbuffer_lengths[proc] = sbuffer[proc].size() * block_size;
437  nsends++;
438 
439  } else {
440 
441  sbuffer_lengths[proc] = 0;
442  }
443  }
444 
445  // Make sure it is a PETSc m_field.get_comm()
446  MPI_Comm comm;
447  CHKERR PetscCommDuplicate(m_field.get_comm(), &comm, NULL);
448 
449  std::vector<MPI_Status> status(m_field.get_comm_size());
450 
451  // Computes the number of messages a node expects to receive
452  int nrecvs; // number of messages received
453  CHKERR PetscGatherNumberOfMessages(comm, NULL, &sbuffer_lengths[0], &nrecvs);
454 
455  // Computes info about messages that a MPI-node will receive, including
456  // (from-id,length) pairs for each message.
457  int *onodes; // list of node-ids from which messages are expected
458  int *olengths; // corresponding message lengths
459  CHKERR PetscGatherMessageLengths(comm, nsends, nrecvs, &sbuffer_lengths[0],
460  &onodes, &olengths);
461 
462  // Gets a unique new tag from a PETSc communicator. All processors that share
463  // the communicator MUST call this routine EXACTLY the same number of times.
464  // This tag should only be used with the current objects communicator; do NOT
465  // use it with any other MPI communicator.
466  int tag;
467  CHKERR PetscCommGetNewTag(comm, &tag);
468 
469  // Allocate a buffer sufficient to hold messages of size specified in
470  // olengths. And post Irecvs on these buffers using node info from onodes
471  int **rbuf; // must bee freed by user
472  MPI_Request *r_waits; // must bee freed by user
473  // rbuf has a pointers to messages. It has size of of nrecvs (number of
474  // messages) +1. In the first index a block is allocated,
475  // such that rbuf[i] = rbuf[i-1]+olengths[i-1].
476  CHKERR PetscPostIrecvInt(comm, tag, nrecvs, onodes, olengths, &rbuf,
477  &r_waits);
478 
479  MPI_Request *s_waits; // status of sens messages
480  CHKERR PetscMalloc1(nsends, &s_waits);
481 
482  // Send messages
483  for (int proc = 0, kk = 0; proc < m_field.get_comm_size(); proc++) {
484  if (!sbuffer_lengths[proc])
485  continue; // no message to send to this proc
486  CHKERR MPI_Isend(&(sbuffer[proc])[0], // buffer to send
487  sbuffer_lengths[proc], // message length
488  MPIU_INT, proc, // to proc
489  tag, comm, s_waits + kk);
490  kk++;
491  }
492 
493  // Wait for received
494  if (nrecvs)
495  CHKERR MPI_Waitall(nrecvs, r_waits, &status[0]);
496 
497  // Wait for send messages
498  if (nsends)
499  CHKERR MPI_Waitall(nsends, s_waits, &status[0]);
500 
501  if (verb >= VERBOSE) {
502  MOFEM_LOG_C("SYNC", Sev::verbose,
503  "Rank %d nb. shared to synchronise parents ents %u\n",
504  m_field.get_comm_rank(), shared.size());
505  }
506 
507  // synchronise range
508  for (int kk = 0; kk < nrecvs; kk++) {
509 
510  int len = olengths[kk];
511  int *data_from_proc = rbuf[kk];
512 
513  for (int ee = 0; ee < len;) {
514  EntityHandle ent;
515  EntityHandle parent;
516  unsigned long long uulong_bit;
517  bcopy(&data_from_proc[ee], &ent, sizeof(EntityHandle));
518  ee += block_size;
519  bcopy(&data_from_proc[ee], &parent, sizeof(EntityHandle));
520  ee += block_size;
521  bcopy(&data_from_proc[ee], &uulong_bit, sizeof(unsigned long long));
522  ee += block_size;
523 
524  CHKERR set_parent(ent, parent);
525  CHKERR set_bit(ent, BitRefLevel(uulong_bit));
526 
527  if (verb >= VERBOSE) {
528  MOFEM_LOG_C("SYNC", Sev::noisy, "received %lu (%lu) from %d at %d\n",
529  ent, parent, onodes[kk], m_field.get_comm_rank());
530  MOFEM_LOG("SYNC", Sev::noisy) << "Bit " << BitRefLevel(uulong_bit);
531  }
532  }
533  }
534 
535  // Cleaning
536  CHKERR PetscFree(s_waits);
537  CHKERR PetscFree(rbuf[0]);
538  CHKERR PetscFree(rbuf);
539  CHKERR PetscFree(r_waits);
540  CHKERR PetscFree(onodes);
541  CHKERR PetscFree(olengths);
542  CHKERR PetscCommDestroy(&comm);
543 
544  if (verb >= VERBOSE)
545  MOFEM_LOG_SYNCHRONISE(m_field.get_comm());
546 
548 }
549 
551  const Problem *problem_ptr, const std::string &fe_name, int verb) {
552  MoFEM::Interface &m_field = cOre;
554  ParallelComm *pcomm = ParallelComm::get_pcomm(
555  &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
556  std::vector<int> shprocs(MAX_SHARING_PROCS, 0);
557  std::vector<EntityHandle> shhandles(MAX_SHARING_PROCS, 0);
558  Range ents;
559  Tag th_gid = m_field.get_moab().globalId_tag();
560  PetscLayout layout;
561  CHKERR problem_ptr->getNumberOfElementsByNameAndPart(m_field.get_comm(),
562  fe_name, &layout);
563  int gid, last_gid;
564  CHKERR PetscLayoutGetRange(layout, &gid, &last_gid);
565  CHKERR PetscLayoutDestroy(&layout);
566 
567  const FiniteElement_multiIndex *fes_ptr;
568  CHKERR m_field.get_finite_elements(&fes_ptr);
569 
570  auto fe_miit = fes_ptr->get<FiniteElement_name_mi_tag>().find(fe_name);
571  if (fe_miit != fes_ptr->get<FiniteElement_name_mi_tag>().end()) {
572  auto fit =
573  problem_ptr->numeredFiniteElementsPtr->get<Unique_mi_tag>().lower_bound(
575  0, (*fe_miit)->getFEUId()));
576  auto hi_fe_it =
577  problem_ptr->numeredFiniteElementsPtr->get<Unique_mi_tag>().upper_bound(
579  get_id_for_max_type<MBENTITYSET>(), (*fe_miit)->getFEUId()));
580  for (; fit != hi_fe_it; ++fit) {
581 
582  auto ent = (*fit)->getEnt();
583  auto part = (*fit)->getPart();
584 
585  ents.insert(ent);
586  CHKERR m_field.get_moab().tag_set_data(pcomm->part_tag(), &ent, 1, &part);
587  if (part == pcomm->rank()) {
588  CHKERR m_field.get_moab().tag_set_data(th_gid, &ent, 1, &gid);
589  gid++;
590  }
591  shprocs.clear();
592  shhandles.clear();
593 
594  if (pcomm->size() > 1) {
595 
596  unsigned char pstatus = 0;
597  if (pcomm->rank() != part) {
598  pstatus = PSTATUS_NOT_OWNED;
599  pstatus |= PSTATUS_GHOST;
600  }
601 
602  if (pcomm->size() > 2) {
603  pstatus |= PSTATUS_SHARED;
604  pstatus |= PSTATUS_MULTISHARED;
605  } else {
606  pstatus |= PSTATUS_SHARED;
607  }
608 
609  size_t rrr = 0;
610  for (size_t rr = 0; rr < pcomm->size(); ++rr) {
611  if (rr != pcomm->rank()) {
612  shhandles[rrr] = ent;
613  shprocs[rrr] = rr;
614  ++rrr;
615  }
616  }
617  for (; rrr != pcomm->size(); ++rrr)
618  shprocs[rrr] = -1;
619 
620  if (pstatus & PSTATUS_SHARED) {
621  CHKERR m_field.get_moab().tag_set_data(pcomm->sharedp_tag(), &ent, 1,
622  &shprocs[0]);
623  CHKERR m_field.get_moab().tag_set_data(pcomm->sharedh_tag(), &ent, 1,
624  &shhandles[0]);
625  }
626 
627  if (pstatus & PSTATUS_MULTISHARED) {
628  CHKERR m_field.get_moab().tag_set_data(pcomm->sharedps_tag(), &ent, 1,
629  &shprocs[0]);
630  CHKERR m_field.get_moab().tag_set_data(pcomm->sharedhs_tag(), &ent, 1,
631  &shhandles[0]);
632  }
633  CHKERR m_field.get_moab().tag_set_data(pcomm->pstatus_tag(), &ent, 1,
634  &pstatus);
635  }
636  }
637  }
638 
639  CHKERR pcomm->exchange_tags(th_gid, ents);
641 }
642 
644  const std::string name, const std::string &fe_name, int verb) {
645  MoFEM::Interface &m_field = cOre;
647  const Problem *problem_ptr;
648  CHKERR m_field.get_problem(name, &problem_ptr);
649  CHKERR resolveSharedFiniteElements(problem_ptr, fe_name, verb);
651 }
652 
655  const int num_entities,
656  const int owner_proc, int verb) {
657  MoFEM::Interface &m_field = cOre;
659 
660  if (m_field.get_comm_size() > 1) {
661 
662  ParallelComm *pcomm = ParallelComm::get_pcomm(
663  &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
664 
665  Range all_ents_range;
666  all_ents_range.insert_list(entities, entities + num_entities);
667 
668  auto get_tag = [&]() { return m_field.get_moab().globalId_tag(); };
669 
670  auto delete_tag = [&](auto &&th_gid) {
672  CHKERR m_field.get_moab().tag_delete(th_gid);
674  };
675 
676  auto resolve_shared_ents = [&](auto &&th_gid, auto &all_ents_range) {
677  auto set_gid = [&](auto &th_gid) {
678  std::vector<int> gids(num_entities);
679  for (size_t g = 0; g != all_ents_range.size(); ++g)
680  gids[g] = g + 1;
681  CHKERR m_field.get_moab().tag_set_data(th_gid, all_ents_range,
682  &*gids.begin());
683 
684  return &th_gid;
685  };
686 
687  auto get_skin_ents = [&](auto &all_ents_range) {
688  std::array<Range, 4> proc_ents_skin;
689  proc_ents_skin[3] = all_ents_range.subset_by_dimension(3);
690  proc_ents_skin[2] = all_ents_range.subset_by_dimension(2);
691  proc_ents_skin[1] = all_ents_range.subset_by_dimension(1);
692  proc_ents_skin[0] = all_ents_range.subset_by_dimension(0);
693  return proc_ents_skin;
694  };
695 
696  auto resolve_dim = [&](auto &all_ents_range) {
697  for (int resolve_dim = 3; resolve_dim >= 0; --resolve_dim) {
698  if (all_ents_range.num_of_dimension(resolve_dim))
699  return resolve_dim;
700  }
701  return -1;
702  };
703 
704  auto get_proc_ent = [&](auto &all_ents_range) {
705  Range proc_ent;
706  if (m_field.get_comm_rank() == owner_proc)
707  proc_ent = all_ents_range;
708  return proc_ent;
709  };
710 
711  auto resolve_shared_ents = [&](auto &&proc_ents, auto &&skin_ents) {
712  return pcomm->resolve_shared_ents(
713  0, proc_ents, resolve_dim(all_ents_range),
714  resolve_dim(all_ents_range), skin_ents.data(), set_gid(th_gid));
715  };
716 
717  CHKERR resolve_shared_ents(get_proc_ent(all_ents_range),
718  get_skin_ents(all_ents_range));
719 
720  return th_gid;
721  };
722 
723  CHKERR delete_tag(resolve_shared_ents(get_tag(), all_ents_range));
724 
725  if (verb >= NOISY) {
726 
727  auto print_owner = [&](const EntityHandle e) {
729  int moab_owner_proc;
730  EntityHandle moab_owner_handle;
731  CHKERR pcomm->get_owner_handle(e, moab_owner_proc, moab_owner_handle);
732 
733  unsigned char pstatus = 0;
734 
735  CHKERR m_field.get_moab().tag_get_data(pcomm->pstatus_tag(), &e, 1,
736  &pstatus);
737 
738  std::vector<int> shprocs(MAX_SHARING_PROCS, 0);
739  std::vector<EntityHandle> shhandles(MAX_SHARING_PROCS, 0);
740 
741  CHKERR m_field.get_moab().tag_get_data(pcomm->sharedp_tag(), &e, 1,
742  &shprocs[0]);
743  CHKERR m_field.get_moab().tag_get_data(pcomm->sharedh_tag(), &e, 1,
744  &shhandles[0]);
745  if (pstatus & PSTATUS_MULTISHARED) {
746  CHKERR m_field.get_moab().tag_get_data(pcomm->sharedps_tag(), &e, 1,
747  &shprocs[0]);
748  CHKERR m_field.get_moab().tag_get_data(pcomm->sharedhs_tag(), &e, 1,
749  &shhandles[0]);
750  }
751 
752  std::ostringstream ss;
753 
754  ss << "Rank " << m_field.get_comm_rank() << " ";
755  if (!(pstatus & PSTATUS_NOT_OWNED))
756  ss << "OWNER ";
757  if (pstatus & PSTATUS_SHARED)
758  ss << "PSTATUS_SHARED ";
759  if (pstatus & PSTATUS_MULTISHARED)
760  ss << "PSTATUS_MULTISHARED ";
761 
762  ss << "owner " << moab_owner_proc << " (" << owner_proc << ") ";
763 
764  ss << "shprocs: ";
765  for (size_t r = 0; r != m_field.get_comm_size() + 1; ++r)
766  ss << shprocs[r] << " ";
767 
768  ss << "shhandles: ";
769  for (size_t r = 0; r != m_field.get_comm_size() + 1; ++r)
770  ss << shhandles[r] << " ";
771 
772  ss << std::endl;
773  MOFEM_LOG("SYNC", Sev::noisy) << ss.str();
774  MOFEM_LOG_SYNCHRONISE(m_field.get_comm());
775 
777  };
778 
779  for (auto e : all_ents_range)
780  CHKERR print_owner(e);
781  }
782  }
783 
785 }
786 
788  const int owner_proc,
789  int verb) {
790  MoFEM::Interface &m_field = cOre;
792  if (m_field.get_comm_size() > 1) {
793  const int num_ents = entities.size();
794  std::vector<EntityHandle> vec_ents(num_ents);
795  std::copy(entities.begin(), entities.end(), vec_ents.begin());
796  CHKERR makeEntitiesMultishared(&*vec_ents.begin(), num_ents, owner_proc,
797  verb);
798  }
800 }
801 
804  const int owner_proc, int verb) {
805  MoFEM::Interface &m_field = cOre;
807  if (m_field.get_comm_size() > 1) {
808  EntityHandle field_meshset = m_field.get_field_meshset(field_name);
809  std::vector<EntityHandle> field_ents;
810  CHKERR m_field.get_moab().get_entities_by_handle(field_meshset, field_ents,
811  true);
812  CHKERR makeEntitiesMultishared(&*field_ents.begin(), field_ents.size(),
813  owner_proc, verb);
814  }
816 }
817 
819  int verb) {
820  MoFEM::Interface &m_field = cOre;
822  if (m_field.get_comm_size() > 1) {
823 
824  Range exchange_ents_data_verts, exchange_ents_data;
825 
826  auto *field_ents = m_field.get_field_ents();
827  auto field_bit_number = m_field.get_field_bit_number(field_name);
828  auto lo = field_ents->get<Unique_mi_tag>().lower_bound(
829  FieldEntity::getLoBitNumberUId(field_bit_number));
830  auto hi = field_ents->get<Unique_mi_tag>().lower_bound(
831  FieldEntity::getHiBitNumberUId(field_bit_number));
832 
833  for (auto it = lo; it != hi; ++it)
834  if (
835 
836  ((*it)->getPStatus()) &&
837 
838  (*it)->getNbDofsOnEnt()
839 
840  ) {
841  if ((*it)->getEntType() == MBVERTEX)
842  exchange_ents_data_verts.insert((*it)->getEnt());
843  else
844  exchange_ents_data.insert((*it)->getEnt());
845  }
846 
847  auto field_ptr = m_field.get_field_structure(field_name);
848  ParallelComm *pcomm = ParallelComm::get_pcomm(
849  &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
850 
851  auto exchange = [&](const Range &ents, Tag th) {
853  if (!ents.empty()) {
854  std::vector<Tag> tags;
855  tags.push_back(th);
856  CHKERR pcomm->exchange_tags(tags, tags, ents);
857  }
859  };
860 
861  CHKERR exchange(exchange_ents_data_verts, field_ptr->th_FieldDataVerts);
862  CHKERR exchange(exchange_ents_data, field_ptr->th_FieldData);
863  }
865 }
866 
868 CommInterface::partitionMesh(const Range &ents, const int dim,
869  const int adj_dim, const int n_parts,
870  Tag *th_vertex_weights, Tag *th_edge_weights,
871  Tag *th_part_weights, int verb, const bool debug) {
872  MoFEM::Interface &m_field = cOre;
874 
875  // get layout
876  int rstart, rend, nb_elems;
877  {
878  PetscLayout layout;
879  CHKERR PetscLayoutCreate(m_field.get_comm(), &layout);
880  CHKERR PetscLayoutSetBlockSize(layout, 1);
881  CHKERR PetscLayoutSetSize(layout, ents.size());
882  CHKERR PetscLayoutSetUp(layout);
883  CHKERR PetscLayoutGetSize(layout, &nb_elems);
884  CHKERR PetscLayoutGetRange(layout, &rstart, &rend);
885  CHKERR PetscLayoutDestroy(&layout);
886  if (verb >= VERBOSE) {
887  MOFEM_LOG("SYNC", Sev::inform)
888  << "Finite elements in problem: row lower " << rstart << " row upper "
889  << rend << " nb. elems " << nb_elems << " ( " << ents.size() << " )";
891  }
892  }
893 
894  std::vector<EntityHandle> weight_ents;
895  weight_ents.reserve(rend - rstart + 1);
896 
897  struct AdjBridge {
898  EntityHandle ent;
899  std::vector<int> adj;
900  AdjBridge(const EntityHandle ent, std::vector<int> &adj)
901  : ent(ent), adj(adj) {}
902  };
903 
904  typedef multi_index_container<
905  AdjBridge,
906  indexed_by<
907 
908  hashed_unique<member<AdjBridge, EntityHandle, &AdjBridge::ent>>
909 
910  >>
911  AdjBridgeMap;
912 
913  auto get_it = [&](auto i) {
914  auto it = ents.begin();
915  for (; i > 0; --i) {
916  if (it == ents.end())
917  break;
918  ++it;
919  }
920  return it;
921  };
922 
923  Range proc_ents;
924  proc_ents.insert(get_it(rstart), get_it(rend));
925  if (proc_ents.size() != rend - rstart)
926  SETERRQ2(PETSC_COMM_WORLD, MOFEM_DATA_INCONSISTENCY,
927  "Wrong number of elements in range %d != %d", proc_ents.size(),
928  rend - rstart);
929 
930  Range all_dim_ents;
931  CHKERR m_field.get_moab().get_adjacencies(
932  proc_ents, adj_dim, true, all_dim_ents, moab::Interface::UNION);
933 
934  AdjBridgeMap adj_bridge_map;
935  auto hint = adj_bridge_map.begin();
936  std::vector<int> adj;
937  for (auto ent : all_dim_ents) {
938  Range adj_ents;
939  CHKERR m_field.get_moab().get_adjacencies(&ent, 1, dim, false, adj_ents);
940  adj_ents = intersect(adj_ents, ents);
941  adj.clear();
942  adj.reserve(adj_ents.size());
943  for (auto a : adj_ents)
944  adj.emplace_back(ents.index(a));
945  hint = adj_bridge_map.emplace_hint(hint, ent, adj);
946  }
947 
948  int *_i;
949  int *_j;
950  {
951  const int nb_loc_elements = rend - rstart;
952  std::vector<int> i(nb_loc_elements + 1, 0), j;
953  {
954  std::vector<int> row_adj;
955  Range::iterator fe_it;
956  int ii, jj;
957  size_t max_row_size;
958  for (fe_it = proc_ents.begin(), ii = rstart, jj = 0, max_row_size = 0;
959  fe_it != proc_ents.end(); ++fe_it, ++ii) {
960 
961  if (type_from_handle(*fe_it) == MBENTITYSET) {
962  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED,
963  "not yet implemented, don't know what to do for meshset "
964  "element");
965  } else {
966 
967  Range dim_ents;
968  CHKERR m_field.get_moab().get_adjacencies(&*fe_it, 1, adj_dim, false,
969  dim_ents);
970  dim_ents = intersect(dim_ents, all_dim_ents);
971 
972  row_adj.clear();
973  for (auto e : dim_ents) {
974  auto adj_it = adj_bridge_map.find(e);
975  if (adj_it != adj_bridge_map.end()) {
976 
977  for (const auto idx : adj_it->adj)
978  row_adj.push_back(idx);
979 
980  } else
981  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
982  "Entity not found");
983  }
984 
985  std::sort(row_adj.begin(), row_adj.end());
986  auto end = std::unique(row_adj.begin(), row_adj.end());
987 
988  size_t row_size = std::distance(row_adj.begin(), end);
989  max_row_size = std::max(max_row_size, row_size);
990  if (j.capacity() < (nb_loc_elements - jj) * max_row_size)
991  j.reserve(nb_loc_elements * max_row_size);
992 
993  i[jj] = j.size();
994  auto diag = ents.index(*fe_it);
995  for (auto it = row_adj.begin(); it != end; ++it)
996  if (*it != diag)
997  j.push_back(*it);
998  }
999 
1000  ++jj;
1001 
1002  if (th_vertex_weights != NULL)
1003  weight_ents.push_back(*fe_it);
1004  }
1005 
1006  i[jj] = j.size();
1007  }
1008 
1009  CHKERR PetscMalloc(i.size() * sizeof(int), &_i);
1010  CHKERR PetscMalloc(j.size() * sizeof(int), &_j);
1011  copy(i.begin(), i.end(), _i);
1012  copy(j.begin(), j.end(), _j);
1013  }
1014 
1015  // get weights
1016  int *vertex_weights = NULL;
1017  if (th_vertex_weights != NULL) {
1018  CHKERR PetscMalloc(weight_ents.size() * sizeof(int), &vertex_weights);
1019  CHKERR m_field.get_moab().tag_get_data(*th_vertex_weights,
1020  &*weight_ents.begin(),
1021  weight_ents.size(), vertex_weights);
1022  }
1023 
1024  {
1025  Mat Adj;
1026  // Adjacency matrix used to partition problems, f.e. METIS
1027  CHKERR MatCreateMPIAdj(m_field.get_comm(), rend - rstart, nb_elems, _i, _j,
1028  PETSC_NULL, &Adj);
1029  CHKERR MatSetOption(Adj, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
1030 
1031  if (debug) {
1032  Mat A;
1033  CHKERR MatConvert(Adj, MATMPIAIJ, MAT_INITIAL_MATRIX, &A);
1034  CHKERR MatView(A, PETSC_VIEWER_DRAW_WORLD);
1035  std::string wait;
1036  std::cin >> wait;
1037  CHKERR MatDestroy(&A);
1038  }
1039 
1040  // run pets to do partitioning
1041  MOFEM_LOG("WORLD", Sev::verbose) << "Start";
1042 
1043  MatPartitioning part;
1044  IS is;
1045  CHKERR MatPartitioningCreate(m_field.get_comm(), &part);
1046 
1047  CHKERR MatPartitioningSetAdjacency(part, Adj);
1048  CHKERR MatPartitioningSetFromOptions(part);
1049  CHKERR MatPartitioningSetNParts(part, n_parts);
1050  if (th_vertex_weights != NULL) {
1051  CHKERR MatPartitioningSetVertexWeights(part, vertex_weights);
1052  }
1053  PetscBool same;
1054  PetscObjectTypeCompare((PetscObject)part, MATPARTITIONINGPARMETIS, &same);
1055  if (same) {
1056 #ifdef PARMETIS
1057  CHKERR MatPartitioningApply_Parmetis_MoFEM(part, &is);
1058 #endif
1059  } else {
1060  CHKERR MatPartitioningApply(part, &is);
1061  }
1062 
1063  MOFEM_LOG("WORLD", Sev::verbose) << "End";
1064 
1065  // gather
1066  IS is_gather, is_num, is_gather_num;
1067  CHKERR ISAllGather(is, &is_gather);
1068  CHKERR ISPartitioningToNumbering(is, &is_num);
1069  CHKERR ISAllGather(is_num, &is_gather_num);
1070 
1071  const int *part_number, *gids;
1072  CHKERR ISGetIndices(is_gather, &part_number);
1073  CHKERR ISGetIndices(is_gather_num, &gids);
1074 
1075  // set partition tag and gid tag to entities
1076  ParallelComm *pcomm = ParallelComm::get_pcomm(
1077  &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
1078  Tag part_tag = pcomm->part_tag();
1079  CHKERR m_field.get_moab().tag_set_data(part_tag, ents, part_number);
1080  Tag gid_tag = m_field.get_moab().globalId_tag();
1081 
1082  std::map<int, Range> parts_ents;
1083  {
1084  // get entities on each part
1085  Range::iterator eit = ents.begin();
1086  for (int ii = 0; eit != ents.end(); eit++, ii++) {
1087  parts_ents[part_number[ii]].insert(*eit);
1088  }
1089  Range tagged_sets;
1090  CHKERR m_field.get_moab().get_entities_by_type_and_tag(
1091  0, MBENTITYSET, &part_tag, NULL, 1, tagged_sets,
1092  moab::Interface::UNION);
1093  if (!tagged_sets.empty())
1094  CHKERR m_field.get_moab().tag_delete_data(part_tag, tagged_sets);
1095 
1096  if (n_parts > (int)tagged_sets.size()) {
1097  // too few partition sets - create missing ones
1098  int num_new = n_parts - tagged_sets.size();
1099  for (int i = 0; i < num_new; i++) {
1100  EntityHandle new_set;
1101  CHKERR m_field.get_moab().create_meshset(MESHSET_SET, new_set);
1102  tagged_sets.insert(new_set);
1103  }
1104  } else if (n_parts < (int)tagged_sets.size()) {
1105  // too many partition sets - delete extras
1106  int num_del = tagged_sets.size() - n_parts;
1107  for (int i = 0; i < num_del; i++) {
1108  EntityHandle old_set = tagged_sets.pop_back();
1109  CHKERR m_field.get_moab().delete_entities(&old_set, 1);
1110  }
1111  }
1112  // write a tag to those sets denoting they're partition sets, with a
1113  // value of the proc number
1114  std::vector<int> dum_ids(n_parts);
1115  for (int i = 0; i < n_parts; i++)
1116  dum_ids[i] = i;
1117  CHKERR m_field.get_moab().tag_set_data(part_tag, tagged_sets,
1118  &*dum_ids.begin());
1119  CHKERR m_field.get_moab().clear_meshset(tagged_sets);
1120 
1121  // get lower dimension entities on each part
1122  for (int pp = 0; pp != n_parts; pp++) {
1123  Range dim_ents = parts_ents[pp].subset_by_dimension(dim);
1124  for (int dd = dim - 1; dd >= 0; dd--) {
1125  Range adj_ents;
1126  CHKERR m_field.get_moab().get_adjacencies(
1127  dim_ents, dd, false, adj_ents, moab::Interface::UNION);
1128  parts_ents[pp].merge(adj_ents);
1129  }
1130  }
1131  for (int pp = 1; pp != n_parts; pp++) {
1132  for (int ppp = 0; ppp != pp; ppp++) {
1133  parts_ents[pp] = subtract(parts_ents[pp], parts_ents[ppp]);
1134  }
1135  }
1136 
1137  for (int pp = 0; pp != n_parts; pp++) {
1138  CHKERR m_field.get_moab().add_entities(tagged_sets[pp], parts_ents[pp]);
1139  }
1140 
1141  auto set_part = [&]() {
1143  for (EntityType t = MBEDGE; t != MBENTITYSET; ++t) {
1144  for (int pp = 0; pp != n_parts; pp++) {
1145  Range type_ents = parts_ents[pp].subset_by_type(t);
1146  CHKERR m_field.get_moab().tag_clear_data(part_tag, type_ents, &pp);
1147  }
1148  }
1150  };
1151 
1152  auto set_gid = [&]() {
1154  for (EntityType t = MBVERTEX; t != MBENTITYSET; ++t) {
1155 
1156  void *ptr;
1157  int count;
1158 
1159  int gid = 1; // moab indexing from 1a
1160  for (int pp = 0; pp != n_parts; pp++) {
1161  Range type_ents = parts_ents[pp].subset_by_type(t);
1162 
1163  auto eit = type_ents.begin();
1164  for (; eit != type_ents.end();) {
1165  CHKERR m_field.get_moab().tag_iterate(
1166  gid_tag, eit, type_ents.end(), count, ptr);
1167  auto gid_tag_ptr = static_cast<int *>(ptr);
1168  for (; count > 0; --count) {
1169  *gid_tag_ptr = gid;
1170  ++eit;
1171  ++gid;
1172  ++gid_tag_ptr;
1173  }
1174  }
1175  }
1176  }
1177 
1179  };
1180 
1181  CHKERR set_part();
1182  CHKERR set_gid();
1183 
1184  }
1185 
1186  if (debug) {
1187  if (m_field.get_comm_rank() == 0) {
1188  for (int rr = 0; rr != n_parts; rr++) {
1189  ostringstream ss;
1190  ss << "out_part_" << rr << ".vtk";
1191  MOFEM_LOG("SELF", Sev::inform) << "Save debug part mesh " << ss.str();
1192  EntityHandle meshset;
1193  CHKERR m_field.get_moab().create_meshset(MESHSET_SET, meshset);
1194  CHKERR m_field.get_moab().add_entities(meshset, parts_ents[rr]);
1195  CHKERR m_field.get_moab().write_file(ss.str().c_str(), "VTK", "",
1196  &meshset, 1);
1197  CHKERR m_field.get_moab().delete_entities(&meshset, 1);
1198  }
1199  }
1200  }
1201 
1202  CHKERR ISRestoreIndices(is_gather, &part_number);
1203  CHKERR ISRestoreIndices(is_gather_num, &gids);
1204  CHKERR ISDestroy(&is_num);
1205  CHKERR ISDestroy(&is_gather_num);
1206  CHKERR ISDestroy(&is_gather);
1207  CHKERR ISDestroy(&is);
1208  CHKERR MatPartitioningDestroy(&part);
1209  CHKERR MatDestroy(&Adj);
1210  }
1211 
1213 }
1214 
1215 } // namespace MoFEM
MoFEMFunctionReturnHot
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:460
CHK_MOAB_THROW
#define CHK_MOAB_THROW(err, msg)
Check error code of MoAB function and throw MoFEM exception.
Definition: definitions.h:589
g
constexpr double g
Definition: shallow_wave.cpp:63
MoFEM::CoreTmp< 0 >
Core (interface) class.
Definition: Core.hpp:82
MOFEM_LOG_SEVERITY_SYNC
#define MOFEM_LOG_SEVERITY_SYNC(comm, severity)
Synchronise "SYNC" on curtain severity level.
Definition: LogManager.hpp:352
MoFEM::Problem::getNumberOfElementsByNameAndPart
MoFEMErrorCode getNumberOfElementsByNameAndPart(MPI_Comm comm, const std::string name, PetscLayout *layout) const
Get number of finite elements by name on processors.
Definition: ProblemsMultiIndices.cpp:98
EntityHandle
NOISY
@ NOISY
Definition: definitions.h:224
MoFEM::CoreInterface::get_comm
virtual MPI_Comm & get_comm() const =0
MoFEM::CoreInterface::get_field_structure
virtual const Field * get_field_structure(const std::string &name, enum MoFEMTypes bh=MF_EXIST) const =0
get field structure
MoFEM::Exceptions::MoFEMErrorCode
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
Definition: Exceptions.hpp:56
MoFEM::CoreInterface::get_field_bit_number
virtual FieldBitNumber get_field_bit_number(const std::string name) const =0
get field bit number
MoFEM::CoreInterface::get_comm_rank
virtual int get_comm_rank() const =0
MoFEM::th
Tag th
Definition: Projection10NodeCoordsOnField.cpp:122
MoFEM::CoreInterface::get_problem
virtual const Problem * get_problem(const std::string problem_name) const =0
Get the problem object.
A
constexpr AssemblyType A
Definition: operators_tests.cpp:30
MoFEM::CommInterface::partitionMesh
MoFEMErrorCode partitionMesh(const Range &ents, const int dim, const int adj_dim, const int n_parts, Tag *th_vertex_weights=nullptr, Tag *th_edge_weights=nullptr, Tag *th_part_weights=nullptr, int verb=VERBOSE, const bool debug=false)
Set partition tag to each finite element in the problem.
Definition: CommInterface.cpp:868
MoFEM::CoreInterface::get_finite_elements
virtual const FiniteElement_multiIndex * get_finite_elements() const =0
Get the finite elements object.
MoFEM::CoreInterface::get_field_meshset
virtual EntityHandle get_field_meshset(const std::string name) const =0
get field meshset
MoFEM::CommInterface::resolveParentEntities
MoFEMErrorCode resolveParentEntities(const Range &ent, int verb=DEFAULT_VERBOSITY)
Synchronise parent entities.
Definition: CommInterface.cpp:256
MOFEM_IMPOSSIBLE_CASE
@ MOFEM_IMPOSSIBLE_CASE
Definition: definitions.h:35
sdf.r
int r
Definition: sdf.py:8
MoFEM::DeprecatedCoreInterface
Deprecated interface functions.
Definition: DeprecatedCoreInterface.hpp:16
VERBOSE
@ VERBOSE
Definition: definitions.h:222
MoFEM::CommInterface::makeFieldEntitiesMultishared
MoFEMErrorCode makeFieldEntitiesMultishared(const std::string field_name, const int owner_proc=0, int verb=DEFAULT_VERBOSITY)
make field entities multi shared
Definition: CommInterface.cpp:803
CHKERR
#define CHKERR
Inline error check.
Definition: definitions.h:548
MoFEM::CoreInterface::get_moab
virtual moab::Interface & get_moab()=0
MoFEM
implementation of Data Operators for Forces and Sources
Definition: Common.hpp:10
a
constexpr double a
Definition: approx_sphere.cpp:30
MoFEM::CommInterface::query_interface
MoFEMErrorCode query_interface(boost::typeindex::type_index type_index, UnknownInterface **iface) const
Definition: CommInterface.cpp:16
MOFEM_LOG_C
#define MOFEM_LOG_C(channel, severity, format,...)
Definition: LogManager.hpp:311
bit
auto bit
set bit
Definition: hanging_node_approx.cpp:75
MoFEM::CommInterface::resolveSharedFiniteElements
MoFEMErrorCode resolveSharedFiniteElements(const Problem *problem_ptr, const std::string &fe_name, int verb=DEFAULT_VERBOSITY)
resolve shared entities for finite elements in the problem
Definition: CommInterface.cpp:550
MoFEM::CoreInterface::get_basic_entity_data_ptr
virtual boost::shared_ptr< BasicEntityData > & get_basic_entity_data_ptr()=0
Get pointer to basic entity data.
MoFEM::CoreTmp< 0 >::get_th_RefParentHandle
Tag get_th_RefParentHandle() const
Definition: Core.hpp:197
MoFEM::EntFiniteElement::getLocalUniqueIdCalculate
UId getLocalUniqueIdCalculate() const
Generate UId for finite element entity.
Definition: FEMultiIndices.hpp:528
MOFEM_LOG_SYNCHRONISE
#define MOFEM_LOG_SYNCHRONISE(comm)
Synchronise "SYNC" channel.
Definition: LogManager.hpp:345
MoFEM::CoreTmp< 0 >::get_th_RefBitLevel
Tag get_th_RefBitLevel() const
Definition: Core.hpp:198
MoFEM::FieldEntity::getLoBitNumberUId
static UId getLoBitNumberUId(const FieldBitNumber bit_number)
Definition: FieldEntsMultiIndices.hpp:222
MoFEM::CommInterface::synchroniseFieldEntities
MoFEMErrorCode synchroniseFieldEntities(const std::string name, int verb=DEFAULT_VERBOSITY)
Definition: CommInterface.cpp:244
MoFEM::CoreInterface::get_comm_size
virtual int get_comm_size() const =0
MoFEM::CoreInterface::get_field_ents
virtual const FieldEntity_multiIndex * get_field_ents() const =0
Get the field ents object.
MoFEM::type_from_handle
auto type_from_handle(const EntityHandle h)
get type from entity handle
Definition: Templates.hpp:1898
t
constexpr double t
plate stiffness
Definition: plate.cpp:58
debug
static const bool debug
Definition: dm_create_subdm.cpp:12
i
FTensor::Index< 'i', SPACE_DIM > i
Definition: hcurl_divergence_operator_2d.cpp:27
MoFEM::Problem::numeredFiniteElementsPtr
boost::shared_ptr< NumeredEntFiniteElement_multiIndex > numeredFiniteElementsPtr
store finite elements
Definition: ProblemsMultiIndices.hpp:77
field_name
constexpr auto field_name
Definition: poisson_2d_homogeneous.cpp:13
MoFEM::CommInterface::synchroniseEntities
MoFEMErrorCode synchroniseEntities(Range &ent, std::map< int, Range > *received_ents, int verb=DEFAULT_VERBOSITY)
synchronize entity range on processors (collective)
Definition: CommInterface.cpp:26
MoFEM::UnknownInterface
base class for all interface classes
Definition: UnknownInterface.hpp:34
Range
FTensor::dd
const Tensor2_symmetric_Expr< const ddTensor0< T, Dim, i, j >, typename promote< T, double >::V, Dim, i, j > dd(const Tensor0< T * > &a, const Index< i, Dim > index1, const Index< j, Dim > index2, const Tensor1< int, Dim > &d_ijk, const Tensor1< double, Dim > &d_xyz)
Definition: ddTensor0.hpp:33
MOFEM_LOG
#define MOFEM_LOG(channel, severity)
Log.
Definition: LogManager.hpp:308
MoFEM::CommInterface
Managing BitRefLevels.
Definition: CommInterface.hpp:21
MoFEM::FiniteElement_name_mi_tag
Definition: TagMultiIndices.hpp:26
MoFEM::CommInterface::CommInterface
CommInterface(const MoFEM::Core &core)
Definition: CommInterface.cpp:23
j
FTensor::Index< 'j', 3 > j
Definition: matrix_function.cpp:19
MOFEM_DATA_INCONSISTENCY
@ MOFEM_DATA_INCONSISTENCY
Definition: definitions.h:31
MoFEM::FieldEntity::getHiBitNumberUId
static UId getHiBitNumberUId(const FieldBitNumber bit_number)
Definition: FieldEntsMultiIndices.hpp:228
MoFEM::CommInterface::makeEntitiesMultishared
MoFEMErrorCode makeEntitiesMultishared(const EntityHandle *entities, const int num_entities, const int owner_proc=0, int verb=DEFAULT_VERBOSITY)
make entities from proc 0 shared on all proc
Definition: CommInterface.cpp:654
FiniteElement_multiIndex
multi_index_container< boost::shared_ptr< FiniteElement >, indexed_by< hashed_unique< tag< FiniteElement_Meshset_mi_tag >, member< FiniteElement, EntityHandle, &FiniteElement::meshset > >, hashed_unique< tag< BitFEId_mi_tag >, const_mem_fun< FiniteElement, BitFEId, &FiniteElement::getId >, HashBit< BitFEId >, EqBit< BitFEId > >, ordered_unique< tag< FiniteElement_name_mi_tag >, const_mem_fun< FiniteElement, boost::string_ref, &FiniteElement::getNameRef > > > > FiniteElement_multiIndex
MultiIndex for entities for FiniteElement.
Definition: FEMultiIndices.hpp:849
MoFEM::CommInterface::cOre
MoFEM::Core & cOre
Definition: CommInterface.hpp:26
MoFEM::Types::BitRefLevel
std::bitset< BITREFLEVEL_SIZE > BitRefLevel
Bit structure attached to each entity identifying to what mesh entity is attached.
Definition: Types.hpp:40
MoFEMFunctionBeginHot
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:453
MoFEM::Unique_mi_tag
Definition: TagMultiIndices.hpp:18
MoFEM::Problem
keeps basic data about problem
Definition: ProblemsMultiIndices.hpp:54
MoFEM::CommInterface::exchangeFieldData
MoFEMErrorCode exchangeFieldData(const std::string field_name, int verb=DEFAULT_VERBOSITY)
Exchange field data.
Definition: CommInterface.cpp:818
MoFEMFunctionReturn
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:429
MOFEM_NOT_IMPLEMENTED
@ MOFEM_NOT_IMPLEMENTED
Definition: definitions.h:32
MoFEMFunctionBegin
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:359