v0.13.2
Loading...
Searching...
No Matches
CommInterface.cpp
Go to the documentation of this file.
1/** \file CommInterface.cpp
2 * \brief Functions for interprocessor communications
3 * \mofem_comm
4 */
5
6namespace MoFEM {
7
8#ifdef PARMETIS
9
10MoFEMErrorCode MatPartitioningApply_Parmetis_MoFEM(MatPartitioning part,
11 IS *partitioning);
12
13#endif // PARMETIS
14
16CommInterface::query_interface(boost::typeindex::type_index type_index,
17 UnknownInterface **iface) const {
19 *iface = const_cast<CommInterface *>(this);
21}
22
24 : cOre(const_cast<MoFEM::Core &>(core)), dEbug(false) {}
25
27 MoFEM::Interface &m_field = cOre;
28 ParallelComm *pcomm = ParallelComm::get_pcomm(
29 &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
31
32 auto get_pstatus = [&](const auto ent) {
33 unsigned char pstatus;
34 CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->pstatus_tag(), &ent,
35 1, &pstatus),
36 "can not get pstatus");
37 return pstatus;
38 };
39
40 auto get_sharing_procs = [&](const auto ent, const auto pstatus) {
41 std::vector<int> sharing_procs(MAX_SHARING_PROCS, -1);
42 if (pstatus & PSTATUS_MULTISHARED) {
43 // entity is multi shared
44 CHK_MOAB_THROW(m_field.get_moab().tag_get_data(
45 pcomm->sharedps_tag(), &ent, 1, &sharing_procs[0]),
46 "can not ger sharing_procs_ptr");
47 } else if (pstatus & PSTATUS_SHARED) {
48 // shared
49 CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->sharedp_tag(), &ent,
50 1, &sharing_procs[0]),
51 "can not get sharing proc");
52 }
53 return sharing_procs;
54 };
55
56 auto get_sharing_handles = [&](const auto ent, const auto pstatus) {
57 std::vector<EntityHandle> sharing_handles(MAX_SHARING_PROCS, 0);
58 if (pstatus & PSTATUS_MULTISHARED) {
59 // entity is multi shared
60 CHK_MOAB_THROW(m_field.get_moab().tag_get_data(
61 pcomm->sharedhs_tag(), &ent, 1, &sharing_handles[0]),
62 "get shared handles");
63 } else if (pstatus & PSTATUS_SHARED) {
64 // shared
65 CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->sharedh_tag(), &ent,
66 1, &sharing_handles[0]),
67 "get sharing handle");
68 }
69 return sharing_handles;
70 };
71
72 // make a buffer
73 std::vector<std::vector<EntityHandle>> sbuffer(m_field.get_comm_size());
74
75 for (auto ent : ents) {
76
77 auto pstatus = get_pstatus(ent);
78 if (pstatus) {
79 auto sharing_procs = get_sharing_procs(ent, pstatus);
80 auto sharing_handles = get_sharing_handles(ent, pstatus);
81
82 if (verb >= NOISY) {
83 MOFEM_LOG("SYNC", Sev::noisy) << "pstatus " << std::bitset<8>(pstatus);
84 }
85
86 for (int proc = 0; proc < MAX_SHARING_PROCS && -1 != sharing_procs[proc];
87 proc++) {
88 if (sharing_procs[proc] == -1)
89 SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSSIBLE_CASE,
90 "sharing processor not set");
91
92 if (sharing_procs[proc] == m_field.get_comm_rank())
93 continue;
94
95 const auto handle_on_sharing_proc = sharing_handles[proc];
96 sbuffer[sharing_procs[proc]].push_back(handle_on_sharing_proc);
97 if (verb >= NOISY)
98 MOFEM_LOG_C("SYNC", Sev::noisy, "send %lu (%lu) to %d at %d\n", ent,
99 handle_on_sharing_proc, sharing_procs[proc],
100 m_field.get_comm_rank());
101
102 if (!(pstatus & PSTATUS_MULTISHARED))
103 break;
104 }
105 }
106 }
107
108 int nsends = 0; // number of messages to send
109 std::vector<int> sbuffer_lengths(
110 m_field.get_comm_size()); // length of the message to proc
111 const size_t block_size = sizeof(EntityHandle) / sizeof(int);
112 for (int proc = 0; proc < m_field.get_comm_size(); proc++) {
113
114 if (!sbuffer[proc].empty()) {
115
116 sbuffer_lengths[proc] = sbuffer[proc].size() * block_size;
117 nsends++;
118
119 } else {
120
121 sbuffer_lengths[proc] = 0;
122 }
123 }
124
125 // Make sure it is a PETSc m_field.get_comm()
126 MPI_Comm comm;
127 CHKERR PetscCommDuplicate(m_field.get_comm(), &comm, NULL);
128
129 std::vector<MPI_Status> status(m_field.get_comm_size());
130
131 // Computes the number of messages a node expects to receive
132 int nrecvs; // number of messages received
133 CHKERR PetscGatherNumberOfMessages(comm, NULL, &sbuffer_lengths[0], &nrecvs);
134
135 // Computes info about messages that a MPI-node will receive, including
136 // (from-id,length) pairs for each message.
137 int *onodes; // list of node-ids from which messages are expected
138 int *olengths; // corresponding message lengths
139 CHKERR PetscGatherMessageLengths(comm, nsends, nrecvs, &sbuffer_lengths[0],
140 &onodes, &olengths);
141
142 // Gets a unique new tag from a PETSc communicator. All processors that share
143 // the communicator MUST call this routine EXACTLY the same number of times.
144 // This tag should only be used with the current objects communicator; do NOT
145 // use it with any other MPI communicator.
146 int tag;
147 CHKERR PetscCommGetNewTag(comm, &tag);
148
149 // Allocate a buffer sufficient to hold messages of size specified in
150 // olengths. And post Irecvs on these buffers using node info from onodes
151 int **rbuf; // must bee freed by user
152 MPI_Request *r_waits; // must bee freed by user
153 // rbuf has a pointers to messages. It has size of of nrecvs (number of
154 // messages) +1. In the first index a block is allocated,
155 // such that rbuf[i] = rbuf[i-1]+olengths[i-1].
156 CHKERR PetscPostIrecvInt(comm, tag, nrecvs, onodes, olengths, &rbuf,
157 &r_waits);
158
159 MPI_Request *s_waits; // status of sens messages
160 CHKERR PetscMalloc1(nsends, &s_waits);
161
162 // Send messages
163 for (int proc = 0, kk = 0; proc < m_field.get_comm_size(); proc++) {
164 if (!sbuffer_lengths[proc])
165 continue; // no message to send to this proc
166 CHKERR MPI_Isend(&(sbuffer[proc])[0], // buffer to send
167 sbuffer_lengths[proc], // message length
168 MPIU_INT, proc, // to proc
169 tag, comm, s_waits + kk);
170 kk++;
171 }
172
173 // Wait for received
174 if (nrecvs)
175 CHKERR MPI_Waitall(nrecvs, r_waits, &status[0]);
176
177 // Wait for send messages
178 if (nsends)
179 CHKERR MPI_Waitall(nsends, s_waits, &status[0]);
180
181 if (verb >= VERBOSE) {
182 MOFEM_LOG_C("SYNC", Sev::verbose, "Rank %d nb. before ents %u\n",
183 m_field.get_comm_rank(), ents.size());
184 }
185
186 // synchronise range
187 for (int kk = 0; kk < nrecvs; kk++) {
188
189 int len = olengths[kk];
190 int *data_from_proc = rbuf[kk];
191
192 for (int ee = 0; ee < len;) {
193 EntityHandle ent;
194 bcopy(&data_from_proc[ee], &ent, sizeof(EntityHandle));
195 ents.insert(ent);
196 ee += block_size;
197
198 if (verb >= VERBOSE)
199 MOFEM_LOG_C("SYNC", Sev::noisy, "received %lu from %d at %d\n", ent,
200 onodes[kk], m_field.get_comm_rank());
201 }
202 }
203
204 if (verb >= VERBOSE)
205 MOFEM_LOG_C("SYNC", Sev::verbose, "Rank %d nb. after ents %u",
206 m_field.get_comm_rank(), ents.size());
207
208 // Cleaning
209 CHKERR PetscFree(s_waits);
210 CHKERR PetscFree(rbuf[0]);
211 CHKERR PetscFree(rbuf);
212 CHKERR PetscFree(r_waits);
213 CHKERR PetscFree(onodes);
214 CHKERR PetscFree(olengths);
215 CHKERR PetscCommDestroy(&comm);
216
217 if (verb >= VERBOSE)
219
221}
222
224 int verb) {
225 MoFEM::Interface &m_field = cOre;
227 EntityHandle idm = m_field.get_field_meshset(name);
228 Range ents;
229 CHKERR m_field.get_moab().get_entities_by_handle(idm, ents, false);
230 CHKERR synchroniseEntities(ents, verb);
231 CHKERR m_field.get_moab().add_entities(idm, ents);
233}
234
236 int verb) {
237 MoFEM::Interface &m_field = cOre;
238 ParallelComm *pcomm = ParallelComm::get_pcomm(
239 &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
240
242
243 Range shared = ents;
244 CHKERR pcomm->filter_pstatus(shared, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1,
245 nullptr);
246 CHKERR pcomm->filter_pstatus(shared, PSTATUS_SHARED | PSTATUS_MULTISHARED,
247 PSTATUS_OR, -1, nullptr);
248
249 auto th_RefParentHandle = cOre.get_th_RefParentHandle();
250 auto th_RefBitLevel = cOre.get_th_RefBitLevel();
251
252 auto get_pstatus = [&](const auto ent) {
253 unsigned char pstatus;
254 CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->pstatus_tag(), &ent,
255 1, &pstatus),
256 "can not get pstatus");
257 return pstatus;
258 };
259
260 auto get_sharing_procs = [&](const auto ent, const auto pstatus) {
261 std::vector<int> sharing_procs(MAX_SHARING_PROCS, -1);
262 if (pstatus & PSTATUS_MULTISHARED) {
263 // entity is multi shared
264 CHK_MOAB_THROW(m_field.get_moab().tag_get_data(
265 pcomm->sharedps_tag(), &ent, 1, &sharing_procs[0]),
266 "can not ger sharing_procs_ptr");
267 } else if (pstatus & PSTATUS_SHARED) {
268 // shared
269 CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->sharedp_tag(), &ent,
270 1, &sharing_procs[0]),
271 "can not get sharing proc");
272 }
273 return sharing_procs;
274 };
275
276 auto get_sharing_handles = [&](const auto ent, const auto pstatus) {
277 std::vector<EntityHandle> sharing_handles(MAX_SHARING_PROCS, 0);
278 if (pstatus & PSTATUS_MULTISHARED) {
279 // entity is multi shared
280 CHK_MOAB_THROW(m_field.get_moab().tag_get_data(
281 pcomm->sharedhs_tag(), &ent, 1, &sharing_handles[0]),
282 "get shared handles");
283 } else if (pstatus & PSTATUS_SHARED) {
284 // shared
285 CHK_MOAB_THROW(m_field.get_moab().tag_get_data(pcomm->sharedh_tag(), &ent,
286 1, &sharing_handles[0]),
287 "get sharing handle");
288 }
289 return sharing_handles;
290 };
291
292 auto get_parent_and_bit = [&](const auto ent) {
293 EntityHandle parent;
295 m_field.get_moab().tag_get_data(th_RefParentHandle, &ent, 1, &parent),
296 "get parent");
299 m_field.get_moab().tag_get_data(th_RefBitLevel, &ent, 1, &bit),
300 "get parent");
301 return std::make_pair(parent, bit);
302 };
303
304 auto set_parent = [&](const auto ent, const auto parent) {
305 return m_field.get_moab().tag_set_data(th_RefParentHandle, &ent, 1,
306 &parent);
307 };
308
309 auto set_bit = [&](const auto ent, const auto bit) {
310 return m_field.get_moab().tag_set_data(th_RefBitLevel, &ent, 1, &bit);
311 };
312
313 // make a buffer
314 std::vector<std::vector<unsigned long long>> sbuffer(m_field.get_comm_size());
315
316 for (auto ent : shared) {
317
318 auto pstatus = get_pstatus(ent);
319 auto sharing_procs = get_sharing_procs(ent, pstatus);
320 auto sharing_handles = get_sharing_handles(ent, pstatus);
321 auto [parent, bit] = get_parent_and_bit(ent);
322
323 if (verb >= NOISY)
324 MOFEM_LOG("SYNC", Sev::noisy) << "pstatus " << std::bitset<8>(pstatus);
325
326 if (parent) {
327 auto pstatus_parent = get_pstatus(parent);
328 auto sharing_procs_parent = get_sharing_procs(parent, pstatus_parent);
329 auto sharing_handles_parent = get_sharing_handles(parent, pstatus_parent);
330
331 for (int proc = 0; proc < MAX_SHARING_PROCS && -1 != sharing_procs[proc];
332 proc++) {
333 if (sharing_procs[proc] == -1)
334 SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSSIBLE_CASE,
335 "sharing processor not set");
336
337 if (sharing_procs[proc] != m_field.get_comm_rank()) {
338
339 auto it = std::find(sharing_procs_parent.begin(),
340 sharing_procs_parent.end(), sharing_procs[proc]);
341 if (it == sharing_procs_parent.end()) {
342 SETERRQ1(
343 PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
344 "Sharing proc for parent entity can not be found proc = %u",
345 sharing_procs[proc]);
346 }
347
348 auto handle_on_sharing_proc = sharing_handles[proc];
349 auto parent_handle_on_sharing_proc =
350 sharing_handles_parent[std::distance(sharing_procs_parent.begin(),
351 it)];
352 sbuffer[sharing_procs[proc]].push_back(handle_on_sharing_proc);
353 sbuffer[sharing_procs[proc]].push_back(parent_handle_on_sharing_proc);
354 try {
355 sbuffer[sharing_procs[proc]].push_back(bit.to_ullong());
356 } catch (std::exception &ex) {
357 MOFEM_LOG("SELF", Sev::warning) << ex.what();
358 MOFEM_LOG("SELF", Sev::warning)
359 << "On " << ent << " "
360 << moab::CN::EntityTypeName(type_from_handle(ent));
361 MOFEM_LOG("SELF", Sev::warning) << "For bit ref " << bit;
362 }
363 if (verb >= NOISY)
364 MOFEM_LOG_C("SYNC", Sev::noisy, "send %lu (%lu) to %d at %d\n", ent,
365 handle_on_sharing_proc, sharing_procs[proc],
366 m_field.get_comm_rank());
367
368 if (!(pstatus & PSTATUS_MULTISHARED))
369 break;
370 }
371 }
372 } else {
373 for (int proc = 0; proc < MAX_SHARING_PROCS && -1 != sharing_procs[proc];
374 proc++) {
375 if (sharing_procs[proc] == -1)
376 SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSSIBLE_CASE,
377 "sharing processor not set");
378
379 if (sharing_procs[proc] != m_field.get_comm_rank()) {
380 auto handle_on_sharing_proc = sharing_handles[proc];
381 sbuffer[sharing_procs[proc]].push_back(handle_on_sharing_proc);
382 sbuffer[sharing_procs[proc]].push_back(parent);
383
384 try {
385 sbuffer[sharing_procs[proc]].push_back(bit.to_ullong());
386 } catch (std::exception &ex) {
387 MOFEM_LOG("SELF", Sev::warning) << ex.what();
388 MOFEM_LOG("SELF", Sev::warning)
389 << "On " << ent << " "
390 << moab::CN::EntityTypeName(type_from_handle(ent));
391 MOFEM_LOG("SELF", Sev::warning) << "For bit ref " << bit;
392 }
393
394 if (verb >= NOISY)
395 MOFEM_LOG_C("SYNC", Sev::noisy, "send %lu (%lu) to %d at %d\n", ent,
396 handle_on_sharing_proc, sharing_procs[proc],
397 m_field.get_comm_rank());
398
399 if (!(pstatus & PSTATUS_MULTISHARED))
400 break;
401 }
402 }
403 }
404 }
405
406 int nsends = 0; // number of messages to send
407 std::vector<int> sbuffer_lengths(
408 m_field.get_comm_size()); // length of the message to proc
409
410 const size_t block_size = sizeof(unsigned long long) / sizeof(int);
411 for (int proc = 0; proc < m_field.get_comm_size(); proc++) {
412
413 if (!sbuffer[proc].empty()) {
414
415 sbuffer_lengths[proc] = sbuffer[proc].size() * block_size;
416 nsends++;
417
418 } else {
419
420 sbuffer_lengths[proc] = 0;
421 }
422 }
423
424 // Make sure it is a PETSc m_field.get_comm()
425 MPI_Comm comm;
426 CHKERR PetscCommDuplicate(m_field.get_comm(), &comm, NULL);
427
428 std::vector<MPI_Status> status(m_field.get_comm_size());
429
430 // Computes the number of messages a node expects to receive
431 int nrecvs; // number of messages received
432 CHKERR PetscGatherNumberOfMessages(comm, NULL, &sbuffer_lengths[0], &nrecvs);
433
434 // Computes info about messages that a MPI-node will receive, including
435 // (from-id,length) pairs for each message.
436 int *onodes; // list of node-ids from which messages are expected
437 int *olengths; // corresponding message lengths
438 CHKERR PetscGatherMessageLengths(comm, nsends, nrecvs, &sbuffer_lengths[0],
439 &onodes, &olengths);
440
441 // Gets a unique new tag from a PETSc communicator. All processors that share
442 // the communicator MUST call this routine EXACTLY the same number of times.
443 // This tag should only be used with the current objects communicator; do NOT
444 // use it with any other MPI communicator.
445 int tag;
446 CHKERR PetscCommGetNewTag(comm, &tag);
447
448 // Allocate a buffer sufficient to hold messages of size specified in
449 // olengths. And post Irecvs on these buffers using node info from onodes
450 int **rbuf; // must bee freed by user
451 MPI_Request *r_waits; // must bee freed by user
452 // rbuf has a pointers to messages. It has size of of nrecvs (number of
453 // messages) +1. In the first index a block is allocated,
454 // such that rbuf[i] = rbuf[i-1]+olengths[i-1].
455 CHKERR PetscPostIrecvInt(comm, tag, nrecvs, onodes, olengths, &rbuf,
456 &r_waits);
457
458 MPI_Request *s_waits; // status of sens messages
459 CHKERR PetscMalloc1(nsends, &s_waits);
460
461 // Send messages
462 for (int proc = 0, kk = 0; proc < m_field.get_comm_size(); proc++) {
463 if (!sbuffer_lengths[proc])
464 continue; // no message to send to this proc
465 CHKERR MPI_Isend(&(sbuffer[proc])[0], // buffer to send
466 sbuffer_lengths[proc], // message length
467 MPIU_INT, proc, // to proc
468 tag, comm, s_waits + kk);
469 kk++;
470 }
471
472 // Wait for received
473 if (nrecvs)
474 CHKERR MPI_Waitall(nrecvs, r_waits, &status[0]);
475
476 // Wait for send messages
477 if (nsends)
478 CHKERR MPI_Waitall(nsends, s_waits, &status[0]);
479
480 if (verb >= VERBOSE) {
481 MOFEM_LOG_C("SYNC", Sev::verbose,
482 "Rank %d nb. shared to synchronise parents ents %u\n",
483 m_field.get_comm_rank(), shared.size());
484 }
485
486 // synchronise range
487 for (int kk = 0; kk < nrecvs; kk++) {
488
489 int len = olengths[kk];
490 int *data_from_proc = rbuf[kk];
491
492 for (int ee = 0; ee < len;) {
493 EntityHandle ent;
494 EntityHandle parent;
495 unsigned long long uulong_bit;
496 bcopy(&data_from_proc[ee], &ent, sizeof(EntityHandle));
497 ee += block_size;
498 bcopy(&data_from_proc[ee], &parent, sizeof(EntityHandle));
499 ee += block_size;
500 bcopy(&data_from_proc[ee], &uulong_bit, sizeof(unsigned long long));
501 ee += block_size;
502
503 CHKERR set_parent(ent, parent);
504 CHKERR set_bit(ent, BitRefLevel(uulong_bit));
505
506 if (verb >= VERBOSE) {
507 MOFEM_LOG_C("SYNC", Sev::noisy, "received %lu (%lu) from %d at %d\n",
508 ent, parent, onodes[kk], m_field.get_comm_rank());
509 MOFEM_LOG("SYNC", Sev::noisy) << "Bit " << BitRefLevel(uulong_bit);
510 }
511 }
512 }
513
514 // Cleaning
515 CHKERR PetscFree(s_waits);
516 CHKERR PetscFree(rbuf[0]);
517 CHKERR PetscFree(rbuf);
518 CHKERR PetscFree(r_waits);
519 CHKERR PetscFree(onodes);
520 CHKERR PetscFree(olengths);
521 CHKERR PetscCommDestroy(&comm);
522
523 if (verb >= VERBOSE)
525
527}
528
530 const Problem *problem_ptr, const std::string &fe_name, int verb) {
531 MoFEM::Interface &m_field = cOre;
533 ParallelComm *pcomm = ParallelComm::get_pcomm(
534 &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
535 std::vector<int> shprocs(MAX_SHARING_PROCS, 0);
536 std::vector<EntityHandle> shhandles(MAX_SHARING_PROCS, 0);
537 Range ents;
538 Tag th_gid = m_field.get_moab().globalId_tag();
539 PetscLayout layout;
540 CHKERR problem_ptr->getNumberOfElementsByNameAndPart(m_field.get_comm(),
541 fe_name, &layout);
542 int gid, last_gid;
543 CHKERR PetscLayoutGetRange(layout, &gid, &last_gid);
544 CHKERR PetscLayoutDestroy(&layout);
545
546 const FiniteElement_multiIndex *fes_ptr;
547 CHKERR m_field.get_finite_elements(&fes_ptr);
548
549 auto fe_miit = fes_ptr->get<FiniteElement_name_mi_tag>().find(fe_name);
550 if (fe_miit != fes_ptr->get<FiniteElement_name_mi_tag>().end()) {
551 auto fit =
552 problem_ptr->numeredFiniteElementsPtr->get<Unique_mi_tag>().lower_bound(
554 0, (*fe_miit)->getFEUId()));
555 auto hi_fe_it =
556 problem_ptr->numeredFiniteElementsPtr->get<Unique_mi_tag>().upper_bound(
558 get_id_for_max_type<MBENTITYSET>(), (*fe_miit)->getFEUId()));
559 for (; fit != hi_fe_it; ++fit) {
560
561 auto ent = (*fit)->getEnt();
562 auto part = (*fit)->getPart();
563
564 ents.insert(ent);
565 CHKERR m_field.get_moab().tag_set_data(pcomm->part_tag(), &ent, 1, &part);
566 if (part == pcomm->rank()) {
567 CHKERR m_field.get_moab().tag_set_data(th_gid, &ent, 1, &gid);
568 gid++;
569 }
570 shprocs.clear();
571 shhandles.clear();
572
573 if (pcomm->size() > 1) {
574
575 unsigned char pstatus = 0;
576 if (pcomm->rank() != part) {
577 pstatus = PSTATUS_NOT_OWNED;
578 pstatus |= PSTATUS_GHOST;
579 }
580
581 if (pcomm->size() > 2) {
582 pstatus |= PSTATUS_SHARED;
583 pstatus |= PSTATUS_MULTISHARED;
584 } else {
585 pstatus |= PSTATUS_SHARED;
586 }
587
588 size_t rrr = 0;
589 for (size_t rr = 0; rr < pcomm->size(); ++rr) {
590 if (rr != pcomm->rank()) {
591 shhandles[rrr] = ent;
592 shprocs[rrr] = rr;
593 ++rrr;
594 }
595 }
596 for (; rrr != pcomm->size(); ++rrr)
597 shprocs[rrr] = -1;
598
599 if (pstatus & PSTATUS_SHARED) {
600 CHKERR m_field.get_moab().tag_set_data(pcomm->sharedp_tag(), &ent, 1,
601 &shprocs[0]);
602 CHKERR m_field.get_moab().tag_set_data(pcomm->sharedh_tag(), &ent, 1,
603 &shhandles[0]);
604 }
605
606 if (pstatus & PSTATUS_MULTISHARED) {
607 CHKERR m_field.get_moab().tag_set_data(pcomm->sharedps_tag(), &ent, 1,
608 &shprocs[0]);
609 CHKERR m_field.get_moab().tag_set_data(pcomm->sharedhs_tag(), &ent, 1,
610 &shhandles[0]);
611 }
612 CHKERR m_field.get_moab().tag_set_data(pcomm->pstatus_tag(), &ent, 1,
613 &pstatus);
614 }
615 }
616 }
617
618 CHKERR pcomm->exchange_tags(th_gid, ents);
620}
621
623 const std::string name, const std::string &fe_name, int verb) {
624 MoFEM::Interface &m_field = cOre;
626 const Problem *problem_ptr;
627 CHKERR m_field.get_problem(name, &problem_ptr);
628 CHKERR resolveSharedFiniteElements(problem_ptr, fe_name, verb);
630}
631
634 const int num_entities,
635 const int owner_proc, int verb) {
636 MoFEM::Interface &m_field = cOre;
638
639 if (m_field.get_comm_size() > 1) {
640
641 ParallelComm *pcomm = ParallelComm::get_pcomm(
642 &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
643
644 Range all_ents_range;
645 all_ents_range.insert_list(entities, entities + num_entities);
646
647 auto get_tag = [&]() { return m_field.get_moab().globalId_tag(); };
648
649 auto delete_tag = [&](auto &&th_gid) {
651 CHKERR m_field.get_moab().tag_delete(th_gid);
653 };
654
655 auto resolve_shared_ents = [&](auto &&th_gid, auto &all_ents_range) {
656 auto set_gid = [&](auto &th_gid) {
657 std::vector<int> gids(num_entities);
658 for (size_t g = 0; g != all_ents_range.size(); ++g)
659 gids[g] = g + 1;
660 CHKERR m_field.get_moab().tag_set_data(th_gid, all_ents_range,
661 &*gids.begin());
662
663 return &th_gid;
664 };
665
666 auto get_skin_ents = [&](auto &all_ents_range) {
667 std::array<Range, 4> proc_ents_skin;
668 proc_ents_skin[3] = all_ents_range.subset_by_dimension(3);
669 proc_ents_skin[2] = all_ents_range.subset_by_dimension(2);
670 proc_ents_skin[1] = all_ents_range.subset_by_dimension(1);
671 proc_ents_skin[0] = all_ents_range.subset_by_dimension(0);
672 return proc_ents_skin;
673 };
674
675 auto resolve_dim = [&](auto &all_ents_range) {
676 for (int resolve_dim = 3; resolve_dim >= 0; --resolve_dim) {
677 if (all_ents_range.num_of_dimension(resolve_dim))
678 return resolve_dim;
679 }
680 return -1;
681 };
682
683 auto get_proc_ent = [&](auto &all_ents_range) {
684 Range proc_ent;
685 if (m_field.get_comm_rank() == owner_proc)
686 proc_ent = all_ents_range;
687 return proc_ent;
688 };
689
690 auto resolve_shared_ents = [&](auto &&proc_ents, auto &&skin_ents) {
691 return pcomm->resolve_shared_ents(
692 0, proc_ents, resolve_dim(all_ents_range),
693 resolve_dim(all_ents_range), skin_ents.data(), set_gid(th_gid));
694 };
695
696 CHKERR resolve_shared_ents(get_proc_ent(all_ents_range),
697 get_skin_ents(all_ents_range));
698
699 return th_gid;
700 };
701
702 CHKERR delete_tag(resolve_shared_ents(get_tag(), all_ents_range));
703
704 if (verb >= NOISY) {
705
706 auto print_owner = [&](const EntityHandle e) {
708 int moab_owner_proc;
709 EntityHandle moab_owner_handle;
710 CHKERR pcomm->get_owner_handle(e, moab_owner_proc, moab_owner_handle);
711
712 unsigned char pstatus = 0;
713
714 CHKERR m_field.get_moab().tag_get_data(pcomm->pstatus_tag(), &e, 1,
715 &pstatus);
716
717 std::vector<int> shprocs(MAX_SHARING_PROCS, 0);
718 std::vector<EntityHandle> shhandles(MAX_SHARING_PROCS, 0);
719
720 CHKERR m_field.get_moab().tag_get_data(pcomm->sharedp_tag(), &e, 1,
721 &shprocs[0]);
722 CHKERR m_field.get_moab().tag_get_data(pcomm->sharedh_tag(), &e, 1,
723 &shhandles[0]);
724 if (pstatus & PSTATUS_MULTISHARED) {
725 CHKERR m_field.get_moab().tag_get_data(pcomm->sharedps_tag(), &e, 1,
726 &shprocs[0]);
727 CHKERR m_field.get_moab().tag_get_data(pcomm->sharedhs_tag(), &e, 1,
728 &shhandles[0]);
729 }
730
731 std::ostringstream ss;
732
733 ss << "Rank " << m_field.get_comm_rank() << " ";
734 if (!(pstatus & PSTATUS_NOT_OWNED))
735 ss << "OWNER ";
736 if (pstatus & PSTATUS_SHARED)
737 ss << "PSTATUS_SHARED ";
738 if (pstatus & PSTATUS_MULTISHARED)
739 ss << "PSTATUS_MULTISHARED ";
740
741 ss << "owner " << moab_owner_proc << " (" << owner_proc << ") ";
742
743 ss << "shprocs: ";
744 for (size_t r = 0; r != m_field.get_comm_size() + 1; ++r)
745 ss << shprocs[r] << " ";
746
747 ss << "shhandles: ";
748 for (size_t r = 0; r != m_field.get_comm_size() + 1; ++r)
749 ss << shhandles[r] << " ";
750
751 ss << std::endl;
752 MOFEM_LOG("SYNC", Sev::noisy) << ss.str();
754
756 };
757
758 for (auto e : all_ents_range)
759 CHKERR print_owner(e);
760 }
761 }
762
764}
765
767 const int owner_proc,
768 int verb) {
769 MoFEM::Interface &m_field = cOre;
771 if (m_field.get_comm_size() > 1) {
772 const int num_ents = entities.size();
773 std::vector<EntityHandle> vec_ents(num_ents);
774 std::copy(entities.begin(), entities.end(), vec_ents.begin());
775 CHKERR makeEntitiesMultishared(&*vec_ents.begin(), num_ents, owner_proc,
776 verb);
777 }
779}
780
783 const int owner_proc, int verb) {
784 MoFEM::Interface &m_field = cOre;
786 if (m_field.get_comm_size() > 1) {
787 EntityHandle field_meshset = m_field.get_field_meshset(field_name);
788 std::vector<EntityHandle> field_ents;
789 CHKERR m_field.get_moab().get_entities_by_handle(field_meshset, field_ents,
790 true);
791 CHKERR makeEntitiesMultishared(&*field_ents.begin(), field_ents.size(),
792 owner_proc, verb);
793 }
795}
796
798 int verb) {
799 MoFEM::Interface &m_field = cOre;
801 if (m_field.get_comm_size() > 1) {
802
803 Range exchange_ents_data_verts, exchange_ents_data;
804
805 auto *field_ents = m_field.get_field_ents();
806 auto field_bit_number = m_field.get_field_bit_number(field_name);
807 auto lo = field_ents->get<Unique_mi_tag>().lower_bound(
808 FieldEntity::getLoBitNumberUId(field_bit_number));
809 auto hi = field_ents->get<Unique_mi_tag>().lower_bound(
810 FieldEntity::getHiBitNumberUId(field_bit_number));
811
812 for (auto it = lo; it != hi; ++it)
813 if (
814
815 ((*it)->getPStatus()) &&
816
817 (*it)->getNbDofsOnEnt()
818
819 ) {
820 if ((*it)->getEntType() == MBVERTEX)
821 exchange_ents_data_verts.insert((*it)->getEnt());
822 else
823 exchange_ents_data.insert((*it)->getEnt());
824 }
825
826 auto field_ptr = m_field.get_field_structure(field_name);
827 ParallelComm *pcomm = ParallelComm::get_pcomm(
828 &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
829
830 auto exchange = [&](const Range &ents, Tag th) {
832 if (!ents.empty()) {
833 std::vector<Tag> tags;
834 tags.push_back(th);
835 CHKERR pcomm->exchange_tags(tags, tags, ents);
836 }
838 };
839
840 CHKERR exchange(exchange_ents_data_verts, field_ptr->th_FieldDataVerts);
841 CHKERR exchange(exchange_ents_data, field_ptr->th_FieldData);
842 }
844}
845
848 const int adj_dim, const int n_parts,
849 Tag *th_vertex_weights, Tag *th_edge_weights,
850 Tag *th_part_weights, int verb, const bool debug) {
851 MoFEM::Interface &m_field = cOre;
853
854 // get layout
855 int rstart, rend, nb_elems;
856 {
857 PetscLayout layout;
858 CHKERR PetscLayoutCreate(m_field.get_comm(), &layout);
859 CHKERR PetscLayoutSetBlockSize(layout, 1);
860 CHKERR PetscLayoutSetSize(layout, ents.size());
861 CHKERR PetscLayoutSetUp(layout);
862 CHKERR PetscLayoutGetSize(layout, &nb_elems);
863 CHKERR PetscLayoutGetRange(layout, &rstart, &rend);
864 CHKERR PetscLayoutDestroy(&layout);
865 if (verb >= VERBOSE) {
866 MOFEM_LOG("SYNC", Sev::inform)
867 << "Finite elements in problem: row lower " << rstart << " row upper "
868 << rend << " nb. elems " << nb_elems << " ( " << ents.size() << " )";
870 }
871 }
872
873 std::vector<EntityHandle> weight_ents;
874 weight_ents.reserve(rend - rstart + 1);
875
876 struct AdjBridge {
877 EntityHandle ent;
878 std::vector<int> adj;
879 AdjBridge(const EntityHandle ent, std::vector<int> &adj)
880 : ent(ent), adj(adj) {}
881 };
882
883 typedef multi_index_container<
884 AdjBridge,
885 indexed_by<
886
887 hashed_unique<member<AdjBridge, EntityHandle, &AdjBridge::ent>>
888
889 >>
890 AdjBridgeMap;
891
892 auto get_it = [&](auto i) {
893 auto it = ents.begin();
894 for (; i > 0; --i) {
895 if (it == ents.end())
896 break;
897 ++it;
898 }
899 return it;
900 };
901
902 Range proc_ents;
903 proc_ents.insert(get_it(rstart), get_it(rend));
904 if (proc_ents.size() != rend - rstart)
905 SETERRQ2(PETSC_COMM_WORLD, MOFEM_DATA_INCONSISTENCY,
906 "Wrong number of elements in range %d != %d", proc_ents.size(),
907 rend - rstart);
908
909 Range all_dim_ents;
910 CHKERR m_field.get_moab().get_adjacencies(
911 proc_ents, adj_dim, true, all_dim_ents, moab::Interface::UNION);
912
913 AdjBridgeMap adj_bridge_map;
914 auto hint = adj_bridge_map.begin();
915 std::vector<int> adj;
916 for (auto ent : all_dim_ents) {
917 Range adj_ents;
918 CHKERR m_field.get_moab().get_adjacencies(&ent, 1, dim, false, adj_ents);
919 adj_ents = intersect(adj_ents, ents);
920 adj.clear();
921 adj.reserve(adj_ents.size());
922 for (auto a : adj_ents)
923 adj.emplace_back(ents.index(a));
924 hint = adj_bridge_map.emplace_hint(hint, ent, adj);
925 }
926
927 int *_i;
928 int *_j;
929 {
930 const int nb_loc_elements = rend - rstart;
931 std::vector<int> i(nb_loc_elements + 1, 0), j;
932 {
933 std::vector<int> row_adj;
934 Range::iterator fe_it;
935 int ii, jj;
936 size_t max_row_size;
937 for (fe_it = proc_ents.begin(), ii = rstart, jj = 0, max_row_size = 0;
938 fe_it != proc_ents.end(); ++fe_it, ++ii) {
939
940 if (type_from_handle(*fe_it) == MBENTITYSET) {
941 SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED,
942 "not yet implemented, don't know what to do for meshset "
943 "element");
944 } else {
945
946 Range dim_ents;
947 CHKERR m_field.get_moab().get_adjacencies(&*fe_it, 1, adj_dim, false,
948 dim_ents);
949 dim_ents = intersect(dim_ents, all_dim_ents);
950
951 row_adj.clear();
952 for (auto e : dim_ents) {
953 auto adj_it = adj_bridge_map.find(e);
954 if (adj_it != adj_bridge_map.end()) {
955
956 for (const auto idx : adj_it->adj)
957 row_adj.push_back(idx);
958
959 } else
960 SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
961 "Entity not found");
962 }
963
964 std::sort(row_adj.begin(), row_adj.end());
965 auto end = std::unique(row_adj.begin(), row_adj.end());
966
967 size_t row_size = std::distance(row_adj.begin(), end);
968 max_row_size = std::max(max_row_size, row_size);
969 if (j.capacity() < (nb_loc_elements - jj) * max_row_size)
970 j.reserve(nb_loc_elements * max_row_size);
971
972 i[jj] = j.size();
973 auto diag = ents.index(*fe_it);
974 for (auto it = row_adj.begin(); it != end; ++it)
975 if (*it != diag)
976 j.push_back(*it);
977 }
978
979 ++jj;
980
981 if (th_vertex_weights != NULL)
982 weight_ents.push_back(*fe_it);
983 }
984
985 i[jj] = j.size();
986 }
987
988 CHKERR PetscMalloc(i.size() * sizeof(int), &_i);
989 CHKERR PetscMalloc(j.size() * sizeof(int), &_j);
990 copy(i.begin(), i.end(), _i);
991 copy(j.begin(), j.end(), _j);
992 }
993
994 // get weights
995 int *vertex_weights = NULL;
996 if (th_vertex_weights != NULL) {
997 CHKERR PetscMalloc(weight_ents.size() * sizeof(int), &vertex_weights);
998 CHKERR m_field.get_moab().tag_get_data(*th_vertex_weights,
999 &*weight_ents.begin(),
1000 weight_ents.size(), vertex_weights);
1001 }
1002
1003 {
1004 Mat Adj;
1005 // Adjacency matrix used to partition problems, f.e. METIS
1006 CHKERR MatCreateMPIAdj(m_field.get_comm(), rend - rstart, nb_elems, _i, _j,
1007 PETSC_NULL, &Adj);
1008 CHKERR MatSetOption(Adj, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
1009
1010 if (debug) {
1011 Mat A;
1012 CHKERR MatConvert(Adj, MATMPIAIJ, MAT_INITIAL_MATRIX, &A);
1013 CHKERR MatView(A, PETSC_VIEWER_DRAW_WORLD);
1014 std::string wait;
1015 std::cin >> wait;
1016 CHKERR MatDestroy(&A);
1017 }
1018
1019 // run pets to do partitioning
1020 MOFEM_LOG("WORLD", Sev::verbose) << "Start";
1021
1022 MatPartitioning part;
1023 IS is;
1024 CHKERR MatPartitioningCreate(m_field.get_comm(), &part);
1025
1026 CHKERR MatPartitioningSetAdjacency(part, Adj);
1027 CHKERR MatPartitioningSetFromOptions(part);
1028 CHKERR MatPartitioningSetNParts(part, n_parts);
1029 if (th_vertex_weights != NULL) {
1030 CHKERR MatPartitioningSetVertexWeights(part, vertex_weights);
1031 }
1032 PetscBool same;
1033 PetscObjectTypeCompare((PetscObject)part, MATPARTITIONINGPARMETIS, &same);
1034 if (same) {
1035#ifdef PARMETIS
1036 CHKERR MatPartitioningApply_Parmetis_MoFEM(part, &is);
1037#endif
1038 } else {
1039 CHKERR MatPartitioningApply(part, &is);
1040 }
1041
1042 MOFEM_LOG("WORLD", Sev::verbose) << "End";
1043
1044 // gather
1045 IS is_gather, is_num, is_gather_num;
1046 CHKERR ISAllGather(is, &is_gather);
1047 CHKERR ISPartitioningToNumbering(is, &is_num);
1048 CHKERR ISAllGather(is_num, &is_gather_num);
1049
1050 const int *part_number, *gids;
1051 CHKERR ISGetIndices(is_gather, &part_number);
1052 CHKERR ISGetIndices(is_gather_num, &gids);
1053
1054 // set partition tag and gid tag to entities
1055 ParallelComm *pcomm = ParallelComm::get_pcomm(
1056 &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
1057 Tag part_tag = pcomm->part_tag();
1058 CHKERR m_field.get_moab().tag_set_data(part_tag, ents, part_number);
1059 Tag gid_tag = m_field.get_moab().globalId_tag();
1060
1061 std::map<int, Range> parts_ents;
1062 {
1063 // get entities on each part
1064 Range::iterator eit = ents.begin();
1065 for (int ii = 0; eit != ents.end(); eit++, ii++) {
1066 parts_ents[part_number[ii]].insert(*eit);
1067 }
1068 Range tagged_sets;
1069 CHKERR m_field.get_moab().get_entities_by_type_and_tag(
1070 0, MBENTITYSET, &part_tag, NULL, 1, tagged_sets,
1071 moab::Interface::UNION);
1072 if (!tagged_sets.empty())
1073 CHKERR m_field.get_moab().tag_delete_data(part_tag, tagged_sets);
1074
1075 if (n_parts > (int)tagged_sets.size()) {
1076 // too few partition sets - create missing ones
1077 int num_new = n_parts - tagged_sets.size();
1078 for (int i = 0; i < num_new; i++) {
1079 EntityHandle new_set;
1080 CHKERR m_field.get_moab().create_meshset(MESHSET_SET, new_set);
1081 tagged_sets.insert(new_set);
1082 }
1083 } else if (n_parts < (int)tagged_sets.size()) {
1084 // too many partition sets - delete extras
1085 int num_del = tagged_sets.size() - n_parts;
1086 for (int i = 0; i < num_del; i++) {
1087 EntityHandle old_set = tagged_sets.pop_back();
1088 CHKERR m_field.get_moab().delete_entities(&old_set, 1);
1089 }
1090 }
1091 // write a tag to those sets denoting they're partition sets, with a
1092 // value of the proc number
1093 std::vector<int> dum_ids(n_parts);
1094 for (int i = 0; i < n_parts; i++)
1095 dum_ids[i] = i;
1096 CHKERR m_field.get_moab().tag_set_data(part_tag, tagged_sets,
1097 &*dum_ids.begin());
1098 CHKERR m_field.get_moab().clear_meshset(tagged_sets);
1099
1100 // get lower dimension entities on each part
1101 for (int pp = 0; pp != n_parts; pp++) {
1102 Range dim_ents = parts_ents[pp].subset_by_dimension(dim);
1103 for (int dd = dim - 1; dd >= 0; dd--) {
1104 Range adj_ents;
1105 if (dd > 0) {
1106 CHKERR m_field.get_moab().get_adjacencies(
1107 dim_ents, dd, false, adj_ents, moab::Interface::UNION);
1108 } else {
1109 CHKERR m_field.get_moab().get_connectivity(dim_ents, adj_ents,
1110 true);
1111 }
1112 parts_ents[pp].merge(adj_ents);
1113 }
1114 }
1115 for (int pp = 1; pp != n_parts; pp++) {
1116 for (int ppp = 0; ppp != pp; ppp++) {
1117 parts_ents[pp] = subtract(parts_ents[pp], parts_ents[ppp]);
1118 }
1119 }
1120 if (debug) {
1121 if (m_field.get_comm_rank() == 0) {
1122 for (int rr = 0; rr != n_parts; rr++) {
1123 ostringstream ss;
1124 ss << "out_part_" << rr << ".vtk";
1125 MOFEM_LOG("SELF", Sev::inform)
1126 << "Save debug part mesh " << ss.str();
1127 EntityHandle meshset;
1128 CHKERR m_field.get_moab().create_meshset(MESHSET_SET, meshset);
1129 CHKERR m_field.get_moab().add_entities(meshset, parts_ents[rr]);
1130 CHKERR m_field.get_moab().write_file(ss.str().c_str(), "VTK", "",
1131 &meshset, 1);
1132 CHKERR m_field.get_moab().delete_entities(&meshset, 1);
1133 }
1134 }
1135 }
1136 for (int pp = 0; pp != n_parts; pp++) {
1137 CHKERR m_field.get_moab().add_entities(tagged_sets[pp], parts_ents[pp]);
1138 }
1139
1140 // set gid and part tag
1141 for (EntityType t = MBVERTEX; t != MBENTITYSET; ++t) {
1142
1143 void *ptr;
1144 int count;
1145
1146 int gid = 1; // moab indexing from 1a
1147 for (int pp = 0; pp != n_parts; pp++) {
1148 Range type_ents = parts_ents[pp].subset_by_type(t);
1149 if (t != MBVERTEX) {
1150 CHKERR m_field.get_moab().tag_clear_data(part_tag, type_ents, &pp);
1151 }
1152
1153 auto eit = type_ents.begin();
1154 for (; eit != type_ents.end();) {
1155 CHKERR m_field.get_moab().tag_iterate(gid_tag, eit, type_ents.end(),
1156 count, ptr);
1157 auto gid_tag_ptr = static_cast<int *>(ptr);
1158 for (; count > 0; --count) {
1159 *gid_tag_ptr = gid;
1160 ++eit;
1161 ++gid;
1162 ++gid_tag_ptr;
1163 }
1164 }
1165 }
1166 }
1167 }
1168
1169 CHKERR ISRestoreIndices(is_gather, &part_number);
1170 CHKERR ISRestoreIndices(is_gather_num, &gids);
1171 CHKERR ISDestroy(&is_num);
1172 CHKERR ISDestroy(&is_gather_num);
1173 CHKERR ISDestroy(&is_gather);
1174 CHKERR ISDestroy(&is);
1175 CHKERR MatPartitioningDestroy(&part);
1176 CHKERR MatDestroy(&Adj);
1177 }
1178
1180}
1181
1182// MoFEMErrorCode partitionAndShare(const EntityHandle meshset,
1183// const int overlap) {
1184// MoFEM::Interface &m_field = cOre;
1185// MoFEMFunctionBegin;
1186
1187// MoFEMFunctionReturn(0);
1188// }
1189
1190} // namespace MoFEM
#define MOFEM_LOG_SYNCHRONISE(comm)
Synchronise "SYNC" channel.
Definition: LogManager.hpp:345
#define MOFEM_LOG_C(channel, severity, format,...)
Definition: LogManager.hpp:311
constexpr double a
@ NOISY
Definition: definitions.h:211
@ VERBOSE
Definition: definitions.h:209
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:447
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:346
#define CHK_MOAB_THROW(err, msg)
Check error code of MoAB function and throw MoFEM exception.
Definition: definitions.h:576
@ MOFEM_IMPOSSIBLE_CASE
Definition: definitions.h:35
@ MOFEM_DATA_INCONSISTENCY
Definition: definitions.h:31
@ MOFEM_NOT_IMPLEMENTED
Definition: definitions.h:32
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:416
#define CHKERR
Inline error check.
Definition: definitions.h:535
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:440
static const bool debug
const int dim
multi_index_container< boost::shared_ptr< FiniteElement >, indexed_by< hashed_unique< tag< FiniteElement_Meshset_mi_tag >, member< FiniteElement, EntityHandle, &FiniteElement::meshset > >, hashed_unique< tag< BitFEId_mi_tag >, const_mem_fun< FiniteElement, BitFEId, &FiniteElement::getId >, HashBit< BitFEId >, EqBit< BitFEId > >, ordered_unique< tag< FiniteElement_name_mi_tag >, const_mem_fun< FiniteElement, boost::string_ref, &FiniteElement::getNameRef > > > > FiniteElement_multiIndex
MultiIndex for entities for FiniteElement.
virtual const Problem * get_problem(const std::string problem_name) const =0
Get the problem object.
virtual const FieldEntity_multiIndex * get_field_ents() const =0
Get the field ents object.
virtual const FiniteElement_multiIndex * get_finite_elements() const =0
Get the finite elements object.
virtual const Field * get_field_structure(const std::string &name, enum MoFEMTypes bh=MF_EXIST) const =0
get field structure
MoFEMErrorCode synchroniseFieldEntities(const std::string name, int verb=DEFAULT_VERBOSITY)
#define MOFEM_LOG(channel, severity)
Log.
Definition: LogManager.hpp:308
MoFEMErrorCode partitionMesh(const Range &ents, const int dim, const int adj_dim, const int n_parts, Tag *th_vertex_weights=nullptr, Tag *th_edge_weights=nullptr, Tag *th_part_weights=nullptr, int verb=VERBOSE, const bool debug=false)
Set partition tag to each finite element in the problem.
MoFEMErrorCode resolveSharedFiniteElements(const Problem *problem_ptr, const std::string &fe_name, int verb=DEFAULT_VERBOSITY)
resolve shared entities for finite elements in the problem
auto bit
set bit
FTensor::Index< 'i', SPACE_DIM > i
FTensor::Index< 'j', 3 > j
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
Definition: Exceptions.hpp:56
std::bitset< BITREFLEVEL_SIZE > BitRefLevel
Bit structure attached to each entity identifying to what mesh entity is attached.
Definition: Types.hpp:40
implementation of Data Operators for Forces and Sources
Definition: Common.hpp:10
auto type_from_handle(const EntityHandle h)
get type from entity handle
Definition: Templates.hpp:1634
constexpr AssemblyType A
constexpr double t
plate stiffness
Definition: plate.cpp:59
constexpr auto field_name
constexpr double g
Managing BitRefLevels.
MoFEMErrorCode query_interface(boost::typeindex::type_index type_index, UnknownInterface **iface) const
CommInterface(const MoFEM::Core &core)
MoFEMErrorCode makeEntitiesMultishared(const EntityHandle *entities, const int num_entities, const int owner_proc=0, int verb=DEFAULT_VERBOSITY)
make entities from proc 0 shared on all proc
MoFEMErrorCode synchroniseEntities(Range &ent, int verb=DEFAULT_VERBOSITY)
MoFEMErrorCode makeFieldEntitiesMultishared(const std::string field_name, const int owner_proc=0, int verb=DEFAULT_VERBOSITY)
make field entities multi shared
MoFEMErrorCode resolveParentEntities(const Range &ent, int verb=DEFAULT_VERBOSITY)
Synchronise parent entities.
MoFEMErrorCode exchangeFieldData(const std::string field_name, int verb=DEFAULT_VERBOSITY)
Exchange field data.
virtual int get_comm_size() const =0
virtual FieldBitNumber get_field_bit_number(const std::string name) const =0
get field bit number
virtual moab::Interface & get_moab()=0
virtual EntityHandle get_field_meshset(const std::string name) const =0
get field meshset
virtual MPI_Comm & get_comm() const =0
virtual int get_comm_rank() const =0
virtual boost::shared_ptr< BasicEntityData > & get_basic_entity_data_ptr()=0
Get pointer to basic entity data.
Core (interface) class.
Definition: Core.hpp:82
Tag get_th_RefBitLevel() const
Definition: Core.hpp:198
Tag get_th_RefParentHandle() const
Definition: Core.hpp:197
Deprecated interface functions.
UId getLocalUniqueIdCalculate() const
Generate UId for finite element entity.
static UId getHiBitNumberUId(const FieldBitNumber bit_number)
static UId getLoBitNumberUId(const FieldBitNumber bit_number)
keeps basic data about problem
MoFEMErrorCode getNumberOfElementsByNameAndPart(MPI_Comm comm, const std::string name, PetscLayout *layout) const
Get number of finite elements by name on processors.
boost::shared_ptr< NumeredEntFiniteElement_multiIndex > numeredFiniteElementsPtr
store finite elements
base class for all interface classes