v0.13.1
CommInterface.cpp
Go to the documentation of this file.
1/** \file CommInterface.cpp
2 * \brief Functions for interprocessor communications
3 * \mofem_comm
4 */
5
6/* MoFEM is free software: you can redistribute it and/or modify it under
7 * the terms of the GNU Lesser General Public License as published by the
8 * Free Software Foundation, either version 3 of the License, or (at your
9 * option) any later version.
10 *
11 * MoFEM is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
14 * License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with MoFEM. If not, see <http://www.gnu.org/licenses/>
18 */
19
20namespace MoFEM {
21
22#ifdef PARMETIS
23
24MoFEMErrorCode MatPartitioningApply_Parmetis_MoFEM(MatPartitioning part,
25 IS *partitioning);
26
27#endif // PARMETIS
28
30CommInterface::query_interface(boost::typeindex::type_index type_index,
31 UnknownInterface **iface) const {
33 *iface = const_cast<CommInterface *>(this);
35}
36
38 : cOre(const_cast<MoFEM::Core &>(core)), dEbug(false) {}
40
42 MoFEM::Interface &m_field = cOre;
43 auto ref_ents_ptr = m_field.get_ref_ents();
45
46 // make a buffer
47 std::vector<std::vector<EntityHandle>> sbuffer(m_field.get_comm_size());
48
49 Range::iterator eit = ents.begin();
50 for (; eit != ents.end(); eit++) {
51
52 auto meit = ref_ents_ptr->get<Ent_mi_tag>().find(*eit);
53 if (meit == ref_ents_ptr->get<Ent_mi_tag>().end()) {
54 continue;
55 SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
56 "rank %d entity %lu not exist on database, local entity can not "
57 "be found for this owner",
58 m_field.get_comm_rank(), *eit);
59 }
60
61 unsigned char pstatus = (*meit)->getPStatus();
62
63 if (pstatus == 0)
64 continue;
65
66 if (verb >= NOISY) {
67 MOFEM_LOG("SYNC", Sev::noisy) << "pstatus " << std::bitset<8>(pstatus);
68 }
69
70 for (int proc = 0;
71 proc < MAX_SHARING_PROCS && -1 != (*meit)->getSharingProcsPtr()[proc];
72 proc++) {
73 if ((*meit)->getSharingProcsPtr()[proc] == -1)
74 SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSIBLE_CASE,
75 "sharing processor not set");
76
77 if ((*meit)->getSharingProcsPtr()[proc] == m_field.get_comm_rank())
78 continue;
79
80 EntityHandle handle_on_sharing_proc =
81 (*meit)->getSharingHandlersPtr()[proc];
82 sbuffer[(*meit)->getSharingProcsPtr()[proc]].push_back(
83 handle_on_sharing_proc);
84 if (verb >= NOISY)
85 MOFEM_LOG_C("SYNC", Sev::noisy, "send %lu (%lu) to %d at %d\n",
86 (*meit)->getEnt(), handle_on_sharing_proc,
87 (*meit)->getSharingProcsPtr()[proc],
88 m_field.get_comm_rank());
89
90 if (!(pstatus & PSTATUS_MULTISHARED))
91 break;
92 }
93 }
94
95 int nsends = 0; // number of messages to send
96 std::vector<int> sbuffer_lengths(
97 m_field.get_comm_size()); // length of the message to proc
98 const size_t block_size = sizeof(EntityHandle) / sizeof(int);
99 for (int proc = 0; proc < m_field.get_comm_size(); proc++) {
100
101 if (!sbuffer[proc].empty()) {
102
103 sbuffer_lengths[proc] = sbuffer[proc].size() * block_size;
104 nsends++;
105
106 } else {
107
108 sbuffer_lengths[proc] = 0;
109 }
110 }
111
112 // // Make sure it is a PETSc m_field.get_comm()
113 MPI_Comm comm;
114 CHKERR PetscCommDuplicate(m_field.get_comm(), &comm, NULL);
115
116 std::vector<MPI_Status> status(m_field.get_comm_size());
117
118 // Computes the number of messages a node expects to receive
119 int nrecvs; // number of messages received
120 CHKERR PetscGatherNumberOfMessages(comm, NULL, &sbuffer_lengths[0], &nrecvs);
121
122 // Computes info about messages that a MPI-node will receive, including
123 // (from-id,length) pairs for each message.
124 int *onodes; // list of node-ids from which messages are expected
125 int *olengths; // corresponding message lengths
126 CHKERR PetscGatherMessageLengths(comm, nsends, nrecvs, &sbuffer_lengths[0],
127 &onodes, &olengths);
128
129 // Gets a unique new tag from a PETSc communicator. All processors that share
130 // the communicator MUST call this routine EXACTLY the same number of times.
131 // This tag should only be used with the current objects communicator; do NOT
132 // use it with any other MPI communicator.
133 int tag;
134 CHKERR PetscCommGetNewTag(comm, &tag);
135
136 // Allocate a buffer sufficient to hold messages of size specified in
137 // olengths. And post Irecvs on these buffers using node info from onodes
138 int **rbuf; // must bee freed by user
139 MPI_Request *r_waits; // must bee freed by user
140 // rbuf has a pointers to messages. It has size of of nrecvs (number of
141 // messages) +1. In the first index a block is allocated,
142 // such that rbuf[i] = rbuf[i-1]+olengths[i-1].
143 CHKERR PetscPostIrecvInt(comm, tag, nrecvs, onodes, olengths, &rbuf,
144 &r_waits);
145
146 MPI_Request *s_waits; // status of sens messages
147 CHKERR PetscMalloc1(nsends, &s_waits);
148
149 // Send messages
150 for (int proc = 0, kk = 0; proc < m_field.get_comm_size(); proc++) {
151 if (!sbuffer_lengths[proc])
152 continue; // no message to send to this proc
153 CHKERR MPI_Isend(&(sbuffer[proc])[0], // buffer to send
154 sbuffer_lengths[proc], // message length
155 MPIU_INT, proc, // to proc
156 tag, comm, s_waits + kk);
157 kk++;
158 }
159
160 // Wait for received
161 if (nrecvs)
162 CHKERR MPI_Waitall(nrecvs, r_waits, &status[0]);
163
164 // Wait for send messages
165 if (nsends)
166 CHKERR MPI_Waitall(nsends, s_waits, &status[0]);
167
168 if (verb >= VERY_VERBOSE) {
169 MOFEM_LOG_C("SYNC", Sev::verbose, "Rank %d nb. before ents %u\n",
170 m_field.get_comm_rank(), ents.size());
171 }
172
173 // synchronise range
174 for (int kk = 0; kk < nrecvs; kk++) {
175
176 int len = olengths[kk];
177 int *data_from_proc = rbuf[kk];
178
179 for (int ee = 0; ee < len; ee += block_size) {
180
181 EntityHandle ent;
182 bcopy(&data_from_proc[ee], &ent, sizeof(EntityHandle));
183 auto meit = ref_ents_ptr->get<Ent_mi_tag>().find(ent);
184 if (meit == ref_ents_ptr->get<Ent_mi_tag>().end())
185 SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
186 "rank %d entity %lu not exist on database, local entity can "
187 "not be found for this owner",
188 m_field.get_comm_rank(), ent);
189
190 if (verb >= VERY_VERBOSE)
191 MOFEM_LOG_C("SYNC", Sev::verbose, "received %ul (%ul) from %d at %d\n",
192 (*meit)->getEnt(), ent, onodes[kk],
193 m_field.get_comm_rank());
194
195 ents.insert((*meit)->getEnt());
196 }
197 }
198
199 if (verb >= VERBOSE)
200 PetscSynchronizedPrintf(m_field.get_comm(), "Rank %d nb. after ents %u\n",
201 m_field.get_comm_rank(), ents.size());
202
203 // Cleaning
204 CHKERR PetscFree(s_waits);
205 CHKERR PetscFree(rbuf[0]);
206 CHKERR PetscFree(rbuf);
207 CHKERR PetscFree(r_waits);
208 CHKERR PetscFree(onodes);
209 CHKERR PetscFree(olengths);
210 CHKERR PetscCommDestroy(&comm);
211
212 if (verb >= VERBOSE)
214
216}
217
219 int verb) {
220 MoFEM::Interface &m_field = cOre;
222 EntityHandle idm = m_field.get_field_meshset(name);
223 Range ents;
224 CHKERR m_field.get_moab().get_entities_by_handle(idm, ents, false);
225 CHKERR synchroniseEntities(ents, verb);
226 CHKERR m_field.get_moab().add_entities(idm, ents);
228}
229
231 const Problem *problem_ptr, const std::string &fe_name, int verb) {
232 MoFEM::Interface &m_field = cOre;
234 ParallelComm *pcomm = ParallelComm::get_pcomm(
235 &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
236 std::vector<int> shprocs(MAX_SHARING_PROCS, 0);
237 std::vector<EntityHandle> shhandles(MAX_SHARING_PROCS, 0);
238 Range ents;
239 Tag th_gid = m_field.get_moab().globalId_tag();
240 PetscLayout layout;
241 CHKERR problem_ptr->getNumberOfElementsByNameAndPart(m_field.get_comm(),
242 fe_name, &layout);
243 int gid, last_gid;
244 CHKERR PetscLayoutGetRange(layout, &gid, &last_gid);
245 CHKERR PetscLayoutDestroy(&layout);
246 for (_IT_NUMEREDFE_BY_NAME_FOR_LOOP_(problem_ptr, fe_name, fe_it)) {
247 EntityHandle ent = (*fe_it)->getEnt();
248 ents.insert(ent);
249 unsigned int part = (*fe_it)->getPart();
250 CHKERR m_field.get_moab().tag_set_data(pcomm->part_tag(), &ent, 1, &part);
251 if (part == pcomm->rank()) {
252 CHKERR m_field.get_moab().tag_set_data(th_gid, &ent, 1, &gid);
253 gid++;
254 }
255 shprocs.clear();
256 shhandles.clear();
257
258 if (pcomm->size() > 1) {
259
260 unsigned char pstatus = 0;
261 if (pcomm->rank() != part) {
262 pstatus = PSTATUS_NOT_OWNED;
263 pstatus |= PSTATUS_GHOST;
264 }
265
266 if (pcomm->size() > 2) {
267 pstatus |= PSTATUS_SHARED;
268 pstatus |= PSTATUS_MULTISHARED;
269 } else {
270 pstatus |= PSTATUS_SHARED;
271 }
272
273 size_t rrr = 0;
274 for (size_t rr = 0; rr < pcomm->size(); ++rr) {
275 if (rr != pcomm->rank()) {
276 shhandles[rrr] = ent;
277 shprocs[rrr] = rr;
278 ++rrr;
279 }
280 }
281 for (; rrr != pcomm->size(); ++rrr)
282 shprocs[rrr] = -1;
283
284 if (pstatus & PSTATUS_SHARED) {
285 CHKERR m_field.get_moab().tag_set_data(pcomm->sharedp_tag(), &ent, 1,
286 &shprocs[0]);
287 CHKERR m_field.get_moab().tag_set_data(pcomm->sharedh_tag(), &ent, 1,
288 &shhandles[0]);
289 }
290
291 if (pstatus & PSTATUS_MULTISHARED) {
292 CHKERR m_field.get_moab().tag_set_data(pcomm->sharedps_tag(), &ent, 1,
293 &shprocs[0]);
294 CHKERR m_field.get_moab().tag_set_data(pcomm->sharedhs_tag(), &ent, 1,
295 &shhandles[0]);
296 }
297 CHKERR m_field.get_moab().tag_set_data(pcomm->pstatus_tag(), &ent, 1,
298 &pstatus);
299 }
300 }
301 CHKERR pcomm->exchange_tags(th_gid, ents);
303}
304
306 const std::string &name, const std::string &fe_name, int verb) {
307 MoFEM::Interface &m_field = cOre;
309 const Problem *problem_ptr;
310 CHKERR m_field.get_problem(name, &problem_ptr);
311 CHKERR resolveSharedFiniteElements(problem_ptr, fe_name, verb);
313}
314
316CommInterface::makeEntitiesMultishared(const EntityHandle *entities,
317 const int num_entities,
318 const int owner_proc, int verb) {
319 MoFEM::Interface &m_field = cOre;
321
322 if (m_field.get_comm_size() > 1) {
323
324 ParallelComm *pcomm = ParallelComm::get_pcomm(
325 &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
326
327 Range all_ents_range;
328 all_ents_range.insert_list(entities, entities + num_entities);
329
330 auto get_tag = [&]() { return m_field.get_moab().globalId_tag(); };
331
332 auto delete_tag = [&](auto &&th_gid) {
334 CHKERR m_field.get_moab().tag_delete(th_gid);
336 };
337
338 auto resolve_shared_ents = [&](auto &&th_gid, auto &all_ents_range) {
339 auto set_gid = [&](auto &th_gid) {
340 std::vector<int> gids(num_entities);
341 for (size_t g = 0; g != all_ents_range.size(); ++g)
342 gids[g] = g + 1;
343 CHKERR m_field.get_moab().tag_set_data(th_gid, all_ents_range,
344 &*gids.begin());
345
346 return &th_gid;
347 };
348
349 auto get_skin_ents = [&](auto &all_ents_range) {
350 std::array<Range, 4> proc_ents_skin;
351 proc_ents_skin[3] = all_ents_range.subset_by_dimension(3);
352 proc_ents_skin[2] = all_ents_range.subset_by_dimension(2);
353 proc_ents_skin[1] = all_ents_range.subset_by_dimension(1);
354 proc_ents_skin[0] = all_ents_range.subset_by_dimension(0);
355 return proc_ents_skin;
356 };
357
358 auto resolve_dim = [&](auto &all_ents_range) {
359 for (int resolve_dim = 3; resolve_dim >= 0; --resolve_dim) {
360 if (all_ents_range.num_of_dimension(resolve_dim))
361 return resolve_dim;
362 }
363 return -1;
364 };
365
366 auto get_proc_ent = [&](auto &all_ents_range) {
367 Range proc_ent;
368 if (m_field.get_comm_rank() == owner_proc)
369 proc_ent = all_ents_range;
370 return proc_ent;
371 };
372
373 auto resolve_shared_ents = [&](auto &&proc_ents, auto &&skin_ents) {
374 return pcomm->resolve_shared_ents(
375 0, proc_ents, resolve_dim(all_ents_range),
376 resolve_dim(all_ents_range), skin_ents.data(), set_gid(th_gid));
377 };
378
379 CHKERR resolve_shared_ents(get_proc_ent(all_ents_range),
380 get_skin_ents(all_ents_range));
381
382 return th_gid;
383 };
384
385 CHKERR delete_tag(resolve_shared_ents(get_tag(), all_ents_range));
386
387 if (verb >= NOISY) {
388
389 auto print_owner = [&](const EntityHandle e) {
391 int moab_owner_proc;
392 EntityHandle moab_owner_handle;
393 CHKERR pcomm->get_owner_handle(e, moab_owner_proc, moab_owner_handle);
394
395 unsigned char pstatus = 0;
396
397 CHKERR m_field.get_moab().tag_get_data(pcomm->pstatus_tag(), &e, 1,
398 &pstatus);
399
400 std::vector<int> shprocs(MAX_SHARING_PROCS, 0);
401 std::vector<EntityHandle> shhandles(MAX_SHARING_PROCS, 0);
402
403 CHKERR m_field.get_moab().tag_get_data(pcomm->sharedp_tag(), &e, 1,
404 &shprocs[0]);
405 CHKERR m_field.get_moab().tag_get_data(pcomm->sharedh_tag(), &e, 1,
406 &shhandles[0]);
407 if (pstatus & PSTATUS_MULTISHARED) {
408 CHKERR m_field.get_moab().tag_get_data(pcomm->sharedps_tag(), &e, 1,
409 &shprocs[0]);
410 CHKERR m_field.get_moab().tag_get_data(pcomm->sharedhs_tag(), &e, 1,
411 &shhandles[0]);
412 }
413
414 std::ostringstream ss;
415
416 ss << "Rank " << m_field.get_comm_rank() << " ";
417 if (!(pstatus & PSTATUS_NOT_OWNED))
418 ss << "OWNER ";
419 if (pstatus & PSTATUS_SHARED)
420 ss << "PSTATUS_SHARED ";
421 if (pstatus & PSTATUS_MULTISHARED)
422 ss << "PSTATUS_MULTISHARED ";
423
424 ss << "owner " << moab_owner_proc << " (" << owner_proc << ") ";
425
426 ss << "shprocs: ";
427 for (size_t r = 0; r != m_field.get_comm_size() + 1; ++r)
428 ss << shprocs[r] << " ";
429
430 ss << "shhandles: ";
431 for (size_t r = 0; r != m_field.get_comm_size() + 1; ++r)
432 ss << shhandles[r] << " ";
433
434 ss << std::endl;
435 MOFEM_LOG("SYNC", Sev::noisy) << ss.str();
437
439 };
440
441 for (auto e : all_ents_range)
442 CHKERR print_owner(e);
443 }
444 }
445
447}
448
450 const int owner_proc,
451 int verb) {
452 MoFEM::Interface &m_field = cOre;
454 if (m_field.get_comm_size() > 1) {
455 const int num_ents = entities.size();
456 std::vector<EntityHandle> vec_ents(num_ents);
457 std::copy(entities.begin(), entities.end(), vec_ents.begin());
458 CHKERR makeEntitiesMultishared(&*vec_ents.begin(), num_ents, owner_proc,
459 verb);
460 }
462}
463
466 const int owner_proc, int verb) {
467 MoFEM::Interface &m_field = cOre;
469 if (m_field.get_comm_size() > 1) {
470 EntityHandle field_meshset = m_field.get_field_meshset(field_name);
471 std::vector<EntityHandle> field_ents;
472 CHKERR m_field.get_moab().get_entities_by_handle(field_meshset, field_ents,
473 true);
474 CHKERR makeEntitiesMultishared(&*field_ents.begin(), field_ents.size(),
475 owner_proc, verb);
476 }
478}
479
481 int verb) {
482 MoFEM::Interface &m_field = cOre;
484 if (m_field.get_comm_size() > 1) {
485
486 Range exchange_ents_data_verts, exchange_ents_data;
487
488 auto *field_ents = m_field.get_field_ents();
489 auto field_bit_number = m_field.get_field_bit_number(field_name);
490 auto lo = field_ents->get<Unique_mi_tag>().lower_bound(
491 FieldEntity::getLoBitNumberUId(field_bit_number));
492 auto hi = field_ents->get<Unique_mi_tag>().lower_bound(
493 FieldEntity::getHiBitNumberUId(field_bit_number));
494
495 for (auto it = lo; it != hi; ++it)
496 if (
497
498 ((*it)->getPStatus()) &&
499
500 (*it)->getNbDofsOnEnt()
501
502 ) {
503 if ((*it)->getEntType() == MBVERTEX)
504 exchange_ents_data_verts.insert((*it)->getEnt());
505 else
506 exchange_ents_data.insert((*it)->getEnt());
507 }
508
509 auto field_ptr = m_field.get_field_structure(field_name);
510 ParallelComm *pcomm = ParallelComm::get_pcomm(
511 &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
512
513 auto exchange = [&](const Range &ents, Tag th) {
515 if (!ents.empty()) {
516 std::vector<Tag> tags;
517 tags.push_back(th);
518 CHKERR pcomm->exchange_tags(tags, tags, ents);
519 }
521 };
522
523 CHKERR exchange(exchange_ents_data_verts, field_ptr->th_FieldDataVerts);
524 CHKERR exchange(exchange_ents_data, field_ptr->th_FieldData);
525 }
527}
528
530CommInterface::partitionMesh(const Range &ents, const int dim,
531 const int adj_dim, const int n_parts,
532 Tag *th_vertex_weights, Tag *th_edge_weights,
533 Tag *th_part_weights, int verb, const bool debug) {
534 MoFEM::Interface &m_field = cOre;
536
537 // get layout
538 int rstart, rend, nb_elems;
539 {
540 PetscLayout layout;
541 CHKERR PetscLayoutCreate(m_field.get_comm(), &layout);
542 CHKERR PetscLayoutSetBlockSize(layout, 1);
543 CHKERR PetscLayoutSetSize(layout, ents.size());
544 CHKERR PetscLayoutSetUp(layout);
545 CHKERR PetscLayoutGetSize(layout, &nb_elems);
546 CHKERR PetscLayoutGetRange(layout, &rstart, &rend);
547 CHKERR PetscLayoutDestroy(&layout);
548 if (verb >= VERBOSE) {
549 MOFEM_LOG("SYNC", Sev::inform)
550 << "Finite elements in problem: row lower " << rstart << " row upper "
551 << rend << " nb. elems " << nb_elems << " ( " << ents.size() << " )";
553 }
554 }
555
556 std::vector<EntityHandle> weight_ents;
557 weight_ents.reserve(rend - rstart + 1);
558
559 struct AdjBridge {
560 EntityHandle ent;
561 std::vector<int> adj;
562 AdjBridge(const EntityHandle ent, std::vector<int> &adj)
563 : ent(ent), adj(adj) {}
564 };
565
566 typedef multi_index_container<
567 AdjBridge,
568 indexed_by<
569
570 hashed_unique<member<AdjBridge, EntityHandle, &AdjBridge::ent>>
571
572 >>
573 AdjBridgeMap;
574
575 auto get_it = [&](auto i) {
576 auto it = ents.begin();
577 for (; i > 0; --i) {
578 if (it == ents.end())
579 break;
580 ++it;
581 }
582 return it;
583 };
584
585 Range proc_ents;
586 proc_ents.insert(get_it(rstart), get_it(rend));
587 if (proc_ents.size() != rend - rstart)
588 SETERRQ2(PETSC_COMM_WORLD, MOFEM_DATA_INCONSISTENCY,
589 "Wrong number of elements in range %d != %d", proc_ents.size(),
590 rend - rstart);
591
592 Range all_dim_ents;
593 CHKERR m_field.get_moab().get_adjacencies(
594 proc_ents, adj_dim, true, all_dim_ents, moab::Interface::UNION);
595
596 AdjBridgeMap adj_bridge_map;
597 auto hint = adj_bridge_map.begin();
598 std::vector<int> adj;
599 for (auto ent : all_dim_ents) {
600 Range adj_ents;
601 CHKERR m_field.get_moab().get_adjacencies(&ent, 1, dim, false, adj_ents);
602 adj_ents = intersect(adj_ents, ents);
603 adj.clear();
604 adj.reserve(adj_ents.size());
605 for (auto a : adj_ents)
606 adj.emplace_back(ents.index(a));
607 hint = adj_bridge_map.emplace_hint(hint, ent, adj);
608 }
609
610 int *_i;
611 int *_j;
612 {
613 const int nb_loc_elements = rend - rstart;
614 std::vector<int> i(nb_loc_elements + 1, 0), j;
615 {
616 std::vector<int> row_adj;
617 Range::iterator fe_it;
618 int ii, jj;
619 size_t max_row_size;
620 for (fe_it = proc_ents.begin(), ii = rstart, jj = 0, max_row_size = 0;
621 fe_it != proc_ents.end(); ++fe_it, ++ii) {
622
623 if (type_from_handle(*fe_it) == MBENTITYSET) {
624 SETERRQ(
625 PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED,
626 "not yet implemented, don't know what to do for meshset element");
627 } else {
628
629 Range dim_ents;
630 CHKERR m_field.get_moab().get_adjacencies(&*fe_it, 1, adj_dim, false,
631 dim_ents);
632 dim_ents = intersect(dim_ents, all_dim_ents);
633
634 row_adj.clear();
635 for (auto e : dim_ents) {
636 auto adj_it = adj_bridge_map.find(e);
637 if (adj_it != adj_bridge_map.end()) {
638
639 for (const auto idx : adj_it->adj)
640 row_adj.push_back(idx);
641
642 } else
643 SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
644 "Entity not found");
645 }
646
647 std::sort(row_adj.begin(), row_adj.end());
648 auto end = std::unique(row_adj.begin(), row_adj.end());
649
650 size_t row_size = std::distance(row_adj.begin(), end);
651 max_row_size = std::max(max_row_size, row_size);
652 if (j.capacity() < (nb_loc_elements - jj) * max_row_size)
653 j.reserve(nb_loc_elements * max_row_size);
654
655 i[jj] = j.size();
656 auto diag = ents.index(*fe_it);
657 for (auto it = row_adj.begin(); it != end; ++it)
658 if (*it != diag)
659 j.push_back(*it);
660 }
661
662 ++jj;
663
664 if (th_vertex_weights != NULL)
665 weight_ents.push_back(*fe_it);
666 }
667
668 i[jj] = j.size();
669 }
670
671 CHKERR PetscMalloc(i.size() * sizeof(int), &_i);
672 CHKERR PetscMalloc(j.size() * sizeof(int), &_j);
673 copy(i.begin(), i.end(), _i);
674 copy(j.begin(), j.end(), _j);
675 }
676
677 // get weights
678 int *vertex_weights = NULL;
679 if (th_vertex_weights != NULL) {
680 CHKERR PetscMalloc(weight_ents.size() * sizeof(int), &vertex_weights);
681 CHKERR m_field.get_moab().tag_get_data(*th_vertex_weights,
682 &*weight_ents.begin(),
683 weight_ents.size(), vertex_weights);
684 }
685
686 {
687 Mat Adj;
688 // Adjacency matrix used to partition problems, f.e. METIS
689 CHKERR MatCreateMPIAdj(m_field.get_comm(), rend - rstart, nb_elems, _i, _j,
690 PETSC_NULL, &Adj);
691 CHKERR MatSetOption(Adj, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
692
693 if (debug) {
694 Mat A;
695 CHKERR MatConvert(Adj, MATMPIAIJ, MAT_INITIAL_MATRIX, &A);
696 CHKERR MatView(A, PETSC_VIEWER_DRAW_WORLD);
697 std::string wait;
698 std::cin >> wait;
699 CHKERR MatDestroy(&A);
700 }
701
702 // run pets to do partitioning
703 MOFEM_LOG("WORLD", Sev::verbose) << "Start";
704
705 MatPartitioning part;
706 IS is;
707 CHKERR MatPartitioningCreate(m_field.get_comm(), &part);
708
709 CHKERR MatPartitioningSetAdjacency(part, Adj);
710 CHKERR MatPartitioningSetFromOptions(part);
711 CHKERR MatPartitioningSetNParts(part, n_parts);
712 if (th_vertex_weights != NULL) {
713 CHKERR MatPartitioningSetVertexWeights(part, vertex_weights);
714 }
715 PetscBool same;
716 PetscObjectTypeCompare((PetscObject)part, MATPARTITIONINGPARMETIS, &same);
717 if (same) {
718#ifdef PARMETIS
719 CHKERR MatPartitioningApply_Parmetis_MoFEM(part, &is);
720#endif
721 } else {
722 CHKERR MatPartitioningApply(part, &is);
723 }
724
725 MOFEM_LOG("WORLD", Sev::verbose) << "End";
726
727 // gather
728 IS is_gather, is_num, is_gather_num;
729 CHKERR ISAllGather(is, &is_gather);
730 CHKERR ISPartitioningToNumbering(is, &is_num);
731 CHKERR ISAllGather(is_num, &is_gather_num);
732
733 const int *part_number, *gids;
734 CHKERR ISGetIndices(is_gather, &part_number);
735 CHKERR ISGetIndices(is_gather_num, &gids);
736
737 // set partition tag and gid tag to entities
738 ParallelComm *pcomm = ParallelComm::get_pcomm(
739 &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
740 Tag part_tag = pcomm->part_tag();
741 CHKERR m_field.get_moab().tag_set_data(part_tag, ents, part_number);
742 Tag gid_tag = m_field.get_moab().globalId_tag();
743
744 std::map<int, Range> parts_ents;
745 {
746 // get entities on each part
747 Range::iterator eit = ents.begin();
748 for (int ii = 0; eit != ents.end(); eit++, ii++) {
749 parts_ents[part_number[ii]].insert(*eit);
750 }
751 Range tagged_sets;
752 CHKERR m_field.get_moab().get_entities_by_type_and_tag(
753 0, MBENTITYSET, &part_tag, NULL, 1, tagged_sets,
754 moab::Interface::UNION);
755 if (!tagged_sets.empty())
756 CHKERR m_field.get_moab().tag_delete_data(part_tag, tagged_sets);
757
758 if (n_parts > (int)tagged_sets.size()) {
759 // too few partition sets - create missing ones
760 int num_new = n_parts - tagged_sets.size();
761 for (int i = 0; i < num_new; i++) {
762 EntityHandle new_set;
763 CHKERR m_field.get_moab().create_meshset(MESHSET_SET, new_set);
764 tagged_sets.insert(new_set);
765 }
766 } else if (n_parts < (int)tagged_sets.size()) {
767 // too many partition sets - delete extras
768 int num_del = tagged_sets.size() - n_parts;
769 for (int i = 0; i < num_del; i++) {
770 EntityHandle old_set = tagged_sets.pop_back();
771 CHKERR m_field.get_moab().delete_entities(&old_set, 1);
772 }
773 }
774 // write a tag to those sets denoting they're partition sets, with a value
775 // of the proc number
776 std::vector<int> dum_ids(n_parts);
777 for (int i = 0; i < n_parts; i++)
778 dum_ids[i] = i;
779 CHKERR m_field.get_moab().tag_set_data(part_tag, tagged_sets,
780 &*dum_ids.begin());
781 CHKERR m_field.get_moab().clear_meshset(tagged_sets);
782
783 // get lower dimension entities on each part
784 for (int pp = 0; pp != n_parts; pp++) {
785 Range dim_ents = parts_ents[pp].subset_by_dimension(dim);
786 for (int dd = dim - 1; dd >= 0; dd--) {
787 Range adj_ents;
788 if (dd > 0) {
789 CHKERR m_field.get_moab().get_adjacencies(
790 dim_ents, dd, false, adj_ents, moab::Interface::UNION);
791 } else {
792 CHKERR m_field.get_moab().get_connectivity(dim_ents, adj_ents,
793 true);
794 }
795 parts_ents[pp].merge(adj_ents);
796 }
797 }
798 for (int pp = 1; pp != n_parts; pp++) {
799 for (int ppp = 0; ppp != pp; ppp++) {
800 parts_ents[pp] = subtract(parts_ents[pp], parts_ents[ppp]);
801 }
802 }
803 if (debug) {
804 if (m_field.get_comm_rank() == 0) {
805 for (int rr = 0; rr != n_parts; rr++) {
806 ostringstream ss;
807 ss << "out_part_" << rr << ".vtk";
808 MOFEM_LOG("SELF", Sev::inform)
809 << "Save debug part mesh " << ss.str();
810 EntityHandle meshset;
811 CHKERR m_field.get_moab().create_meshset(MESHSET_SET, meshset);
812 CHKERR m_field.get_moab().add_entities(meshset, parts_ents[rr]);
813 CHKERR m_field.get_moab().write_file(ss.str().c_str(), "VTK", "",
814 &meshset, 1);
815 CHKERR m_field.get_moab().delete_entities(&meshset, 1);
816 }
817 }
818 }
819 for (int pp = 0; pp != n_parts; pp++) {
820 CHKERR m_field.get_moab().add_entities(tagged_sets[pp], parts_ents[pp]);
821 }
822
823 // set gid and part tag
824 for (EntityType t = MBVERTEX; t != MBENTITYSET; ++t) {
825
826 void *ptr;
827 int count;
828
829 int gid = 1; // moab indexing from 1a
830 for (int pp = 0; pp != n_parts; pp++) {
831 Range type_ents = parts_ents[pp].subset_by_type(t);
832 if (t != MBVERTEX) {
833 CHKERR m_field.get_moab().tag_clear_data(part_tag, type_ents, &pp);
834 }
835
836 auto eit = type_ents.begin();
837 for (; eit != type_ents.end();) {
838 CHKERR m_field.get_moab().tag_iterate(gid_tag, eit, type_ents.end(),
839 count, ptr);
840 auto gid_tag_ptr = static_cast<int *>(ptr);
841 for (; count > 0; --count) {
842 *gid_tag_ptr = gid;
843 ++eit;
844 ++gid;
845 ++gid_tag_ptr;
846 }
847 }
848 }
849 }
850 }
851
852 CHKERR ISRestoreIndices(is_gather, &part_number);
853 CHKERR ISRestoreIndices(is_gather_num, &gids);
854 CHKERR ISDestroy(&is_num);
855 CHKERR ISDestroy(&is_gather_num);
856 CHKERR ISDestroy(&is_gather);
857 CHKERR ISDestroy(&is);
858 CHKERR MatPartitioningDestroy(&part);
859 CHKERR MatDestroy(&Adj);
860 }
861
863}
864
865// MoFEMErrorCode partitionAndShare(const EntityHandle meshset,
866// const int overlap) {
867// MoFEM::Interface &m_field = cOre;
868// MoFEMFunctionBegin;
869
870
871
872// MoFEMFunctionReturn(0);
873// }
874
875} // namespace MoFEM
#define MOFEM_LOG_SYNCHRONISE(comm)
Synchronise "SYNC" channel.
Definition: LogManager.hpp:348
#define MOFEM_LOG_C(channel, severity, format,...)
Definition: LogManager.hpp:314
constexpr double a
@ VERY_VERBOSE
Definition: definitions.h:223
@ NOISY
Definition: definitions.h:224
@ VERBOSE
Definition: definitions.h:222
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:460
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:359
@ MOFEM_IMPOSIBLE_CASE
Definition: definitions.h:48
@ MOFEM_DATA_INCONSISTENCY
Definition: definitions.h:44
@ MOFEM_NOT_IMPLEMENTED
Definition: definitions.h:45
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:429
#define CHKERR
Inline error check.
Definition: definitions.h:548
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:453
static const bool debug
const int dim
virtual const Problem * get_problem(const std::string problem_name) const =0
Get the problem object.
virtual const FieldEntity_multiIndex * get_field_ents() const =0
Get the field ents object.
virtual const RefEntity_multiIndex * get_ref_ents() const =0
Get the ref ents object.
MoFEMErrorCode synchroniseFieldEntities(const std::string name, int verb=DEFAULT_VERBOSITY)
virtual Field * get_field_structure(const std::string &name)=0
get field structure
#define MOFEM_LOG(channel, severity)
Log.
Definition: LogManager.hpp:311
MoFEMErrorCode partitionMesh(const Range &ents, const int dim, const int adj_dim, const int n_parts, Tag *th_vertex_weights=nullptr, Tag *th_edge_weights=nullptr, Tag *th_part_weights=nullptr, int verb=VERBOSE, const bool debug=false)
Set partition tag to each finite element in the problem.
MoFEMErrorCode resolveSharedFiniteElements(const Problem *problem_ptr, const std::string &fe_name, int verb=DEFAULT_VERBOSITY)
resolve shared entities for finite elements in the problem
#define _IT_NUMEREDFE_BY_NAME_FOR_LOOP_(PROBLEMPTR, NAME, IT)
FTensor::Index< 'i', SPACE_DIM > i
FTensor::Index< 'j', 3 > j
const Tensor2_symmetric_Expr< const ddTensor0< T, Dim, i, j >, typename promote< T, double >::V, Dim, i, j > dd(const Tensor0< T * > &a, const Index< i, Dim > index1, const Index< j, Dim > index2, const Tensor1< int, Dim > &d_ijk, const Tensor1< double, Dim > &d_xyz)
Definition: ddTensor0.hpp:33
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
Definition: Exceptions.hpp:67
implementation of Data Operators for Forces and Sources
Definition: Common.hpp:21
auto type_from_handle(const EntityHandle h)
get type from entity handle
Definition: Templates.hpp:1487
const double r
rate factor
double A
constexpr double t
plate stiffness
Definition: plate.cpp:76
constexpr auto field_name
constexpr double g
Managing BitRefLevels.
MoFEMErrorCode query_interface(boost::typeindex::type_index type_index, UnknownInterface **iface) const
~CommInterface()
Destructor.
CommInterface(const MoFEM::Core &core)
MoFEMErrorCode makeEntitiesMultishared(const EntityHandle *entities, const int num_entities, const int owner_proc=0, int verb=DEFAULT_VERBOSITY)
make entities from proc 0 shared on all proc
MoFEMErrorCode synchroniseEntities(Range &ent, int verb=DEFAULT_VERBOSITY)
MoFEMErrorCode makeFieldEntitiesMultishared(const std::string field_name, const int owner_proc=0, int verb=DEFAULT_VERBOSITY)
make field entities multi shared
MoFEMErrorCode exchangeFieldData(const std::string field_name, int verb=DEFAULT_VERBOSITY)
Exchange field data.
virtual int get_comm_size() const =0
virtual FieldBitNumber get_field_bit_number(const std::string name) const =0
get field bit number
virtual moab::Interface & get_moab()=0
virtual EntityHandle get_field_meshset(const std::string name) const =0
get field meshset
virtual MPI_Comm & get_comm() const =0
virtual int get_comm_rank() const =0
virtual boost::shared_ptr< BasicEntityData > & get_basic_entity_data_ptr()=0
Get pointer to basic entity data.
Core (interface) class.
Definition: Core.hpp:92
Deprecated interface functions.
static UId getHiBitNumberUId(const FieldBitNumber bit_number)
static UId getLoBitNumberUId(const FieldBitNumber bit_number)
keeps basic data about problem
MoFEMErrorCode getNumberOfElementsByNameAndPart(MPI_Comm comm, const std::string name, PetscLayout *layout) const
Get number of finite elements by name on processors.
base class for all interface classes