10 MoFEMErrorCode MatPartitioningApply_Parmetis_MoFEM(MatPartitioning part,
24 : cOre(const_cast<
MoFEM::
Core &>(core)), dEbug(false) {}
27 Range &ents, std::map<int, Range> *received_ents,
int verb) {
29 ParallelComm *pcomm = ParallelComm::get_pcomm(
33 auto get_pstatus = [&](
const auto ent) {
34 unsigned char pstatus;
37 "can not get pstatus");
41 auto get_sharing_procs = [&](
const auto ent,
const auto pstatus) {
42 std::vector<int> sharing_procs(MAX_SHARING_PROCS, -1);
43 if (pstatus & PSTATUS_MULTISHARED) {
46 pcomm->sharedps_tag(), &ent, 1, &sharing_procs[0]),
47 "can not ger sharing_procs_ptr");
48 }
else if (pstatus & PSTATUS_SHARED) {
51 1, &sharing_procs[0]),
52 "can not get sharing proc");
57 auto get_sharing_handles = [&](
const auto ent,
const auto pstatus) {
58 std::vector<EntityHandle> sharing_handles(MAX_SHARING_PROCS, 0);
59 if (pstatus & PSTATUS_MULTISHARED) {
62 pcomm->sharedhs_tag(), &ent, 1, &sharing_handles[0]),
63 "get shared handles");
64 }
else if (pstatus & PSTATUS_SHARED) {
67 1, &sharing_handles[0]),
68 "get sharing handle");
70 return sharing_handles;
74 std::vector<std::vector<EntityHandle>> sbuffer(m_field.
get_comm_size());
76 for (
auto ent : ents) {
78 auto pstatus = get_pstatus(ent);
80 auto sharing_procs = get_sharing_procs(ent, pstatus);
81 auto sharing_handles = get_sharing_handles(ent, pstatus);
84 MOFEM_LOG(
"SYNC", Sev::noisy) <<
"pstatus " << std::bitset<8>(pstatus);
87 for (
int proc = 0; proc < MAX_SHARING_PROCS && -1 != sharing_procs[proc];
89 if (sharing_procs[proc] == -1)
91 "sharing processor not set");
96 const auto handle_on_sharing_proc = sharing_handles[proc];
98 sbuffer[sharing_procs[proc]].push_back(handle_on_sharing_proc);
100 MOFEM_LOG_C(
"SYNC", Sev::noisy,
"send %lu (%lu) to %d at %d\n", ent,
101 handle_on_sharing_proc, sharing_procs[proc],
105 if (!(pstatus & PSTATUS_MULTISHARED))
115 std::vector<int> sbuffer_lengths(
117 const size_t block_size =
122 if (!sbuffer[proc].empty()) {
124 sbuffer_lengths[proc] = sbuffer[proc].size() * block_size;
129 sbuffer_lengths[proc] = 0;
141 CHKERR PetscGatherNumberOfMessages(comm, NULL, &sbuffer_lengths[0], &nrecvs);
147 CHKERR PetscGatherMessageLengths(comm, nsends, nrecvs, &sbuffer_lengths[0],
155 CHKERR PetscCommGetNewTag(comm, &tag);
160 MPI_Request *r_waits;
164 CHKERR PetscPostIrecvInt(comm, tag, nrecvs, onodes, olengths, &rbuf,
167 MPI_Request *s_waits;
168 CHKERR PetscMalloc1(nsends, &s_waits);
171 for (
int proc = 0, kk = 0; proc < m_field.
get_comm_size(); proc++) {
172 if (!sbuffer_lengths[proc])
174 CHKERR MPI_Isend(&(sbuffer[proc])[0],
175 sbuffer_lengths[proc],
177 tag, comm, s_waits + kk);
183 CHKERR MPI_Waitall(nrecvs, r_waits, &status[0]);
187 CHKERR MPI_Waitall(nsends, s_waits, &status[0]);
190 MOFEM_LOG_C(
"SYNC", Sev::verbose,
"Rank %d nb. before ents %u\n",
196 for (
int kk = 0; kk < nrecvs; kk++) {
198 int len = olengths[kk];
199 int *data_from_proc = rbuf[kk];
201 for (
int ee = 0; ee < len;) {
207 (*received_ents)[onodes[kk]].insert(ent);
213 MOFEM_LOG_C(
"SYNC", Sev::noisy,
"received %lu from %d at %d\n", ent,
223 MOFEM_LOG_C(
"SYNC", Sev::verbose,
"Rank %d nb. after ents %u",
229 CHKERR PetscFree(s_waits);
230 CHKERR PetscFree(rbuf[0]);
232 CHKERR PetscFree(r_waits);
234 CHKERR PetscFree(olengths);
235 CHKERR PetscCommDestroy(&comm);
250 CHKERR m_field.
get_moab().get_entities_by_handle(idm, ents,
false);
259 ParallelComm *pcomm = ParallelComm::get_pcomm(
265 CHKERR pcomm->filter_pstatus(shared, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1,
267 CHKERR pcomm->filter_pstatus(shared, PSTATUS_SHARED | PSTATUS_MULTISHARED,
268 PSTATUS_OR, -1,
nullptr);
273 auto get_pstatus = [&](
const auto ent) {
274 unsigned char pstatus;
277 "can not get pstatus");
281 auto get_sharing_procs = [&](
const auto ent,
const auto pstatus) {
282 std::vector<int> sharing_procs(MAX_SHARING_PROCS, -1);
283 if (pstatus & PSTATUS_MULTISHARED) {
286 pcomm->sharedps_tag(), &ent, 1, &sharing_procs[0]),
287 "can not ger sharing_procs_ptr");
288 }
else if (pstatus & PSTATUS_SHARED) {
291 1, &sharing_procs[0]),
292 "can not get sharing proc");
294 return sharing_procs;
297 auto get_sharing_handles = [&](
const auto ent,
const auto pstatus) {
298 std::vector<EntityHandle> sharing_handles(MAX_SHARING_PROCS, 0);
299 if (pstatus & PSTATUS_MULTISHARED) {
302 pcomm->sharedhs_tag(), &ent, 1, &sharing_handles[0]),
303 "get shared handles");
304 }
else if (pstatus & PSTATUS_SHARED) {
307 1, &sharing_handles[0]),
308 "get sharing handle");
310 return sharing_handles;
313 auto get_parent_and_bit = [&](
const auto ent) {
316 m_field.
get_moab().tag_get_data(th_RefParentHandle, &ent, 1, &parent),
320 m_field.
get_moab().tag_get_data(th_RefBitLevel, &ent, 1, &
bit),
322 return std::make_pair(parent,
bit);
325 auto set_parent = [&](
const auto ent,
const auto parent) {
326 return m_field.
get_moab().tag_set_data(th_RefParentHandle, &ent, 1,
330 auto set_bit = [&](
const auto ent,
const auto bit) {
331 return m_field.
get_moab().tag_set_data(th_RefBitLevel, &ent, 1, &
bit);
335 std::vector<std::vector<unsigned long long>> sbuffer(m_field.
get_comm_size());
337 for (
auto ent : shared) {
339 auto pstatus = get_pstatus(ent);
340 auto sharing_procs = get_sharing_procs(ent, pstatus);
341 auto sharing_handles = get_sharing_handles(ent, pstatus);
342 auto [parent,
bit] = get_parent_and_bit(ent);
345 MOFEM_LOG(
"SYNC", Sev::noisy) <<
"pstatus " << std::bitset<8>(pstatus);
348 auto pstatus_parent = get_pstatus(parent);
349 auto sharing_procs_parent = get_sharing_procs(parent, pstatus_parent);
350 auto sharing_handles_parent = get_sharing_handles(parent, pstatus_parent);
352 for (
int proc = 0; proc < MAX_SHARING_PROCS && -1 != sharing_procs[proc];
354 if (sharing_procs[proc] == -1)
356 "sharing processor not set");
360 auto it = std::find(sharing_procs_parent.begin(),
361 sharing_procs_parent.end(), sharing_procs[proc]);
362 if (it == sharing_procs_parent.end()) {
365 "Sharing proc for parent entity can not be found proc = %u",
366 sharing_procs[proc]);
369 auto handle_on_sharing_proc = sharing_handles[proc];
370 auto parent_handle_on_sharing_proc =
371 sharing_handles_parent[std::distance(sharing_procs_parent.begin(),
373 sbuffer[sharing_procs[proc]].push_back(handle_on_sharing_proc);
374 sbuffer[sharing_procs[proc]].push_back(parent_handle_on_sharing_proc);
376 sbuffer[sharing_procs[proc]].push_back(
bit.to_ullong());
377 }
catch (std::exception &ex) {
378 MOFEM_LOG(
"SELF", Sev::warning) << ex.what();
380 <<
"On " << ent <<
" "
382 MOFEM_LOG(
"SELF", Sev::warning) <<
"For bit ref " <<
bit;
385 MOFEM_LOG_C(
"SYNC", Sev::noisy,
"send %lu (%lu) to %d at %d\n", ent,
386 handle_on_sharing_proc, sharing_procs[proc],
389 if (!(pstatus & PSTATUS_MULTISHARED))
394 for (
int proc = 0; proc < MAX_SHARING_PROCS && -1 != sharing_procs[proc];
396 if (sharing_procs[proc] == -1)
398 "sharing processor not set");
401 auto handle_on_sharing_proc = sharing_handles[proc];
402 sbuffer[sharing_procs[proc]].push_back(handle_on_sharing_proc);
403 sbuffer[sharing_procs[proc]].push_back(parent);
406 sbuffer[sharing_procs[proc]].push_back(
bit.to_ullong());
407 }
catch (std::exception &ex) {
408 MOFEM_LOG(
"SELF", Sev::warning) << ex.what();
410 <<
"On " << ent <<
" "
412 MOFEM_LOG(
"SELF", Sev::warning) <<
"For bit ref " <<
bit;
416 MOFEM_LOG_C(
"SYNC", Sev::noisy,
"send %lu (%lu) to %d at %d\n", ent,
417 handle_on_sharing_proc, sharing_procs[proc],
420 if (!(pstatus & PSTATUS_MULTISHARED))
428 std::vector<int> sbuffer_lengths(
431 const size_t block_size =
sizeof(
unsigned long long) /
sizeof(
int);
434 if (!sbuffer[proc].empty()) {
436 sbuffer_lengths[proc] = sbuffer[proc].size() * block_size;
441 sbuffer_lengths[proc] = 0;
453 CHKERR PetscGatherNumberOfMessages(comm, NULL, &sbuffer_lengths[0], &nrecvs);
459 CHKERR PetscGatherMessageLengths(comm, nsends, nrecvs, &sbuffer_lengths[0],
467 CHKERR PetscCommGetNewTag(comm, &tag);
472 MPI_Request *r_waits;
476 CHKERR PetscPostIrecvInt(comm, tag, nrecvs, onodes, olengths, &rbuf,
479 MPI_Request *s_waits;
480 CHKERR PetscMalloc1(nsends, &s_waits);
483 for (
int proc = 0, kk = 0; proc < m_field.
get_comm_size(); proc++) {
484 if (!sbuffer_lengths[proc])
486 CHKERR MPI_Isend(&(sbuffer[proc])[0],
487 sbuffer_lengths[proc],
489 tag, comm, s_waits + kk);
495 CHKERR MPI_Waitall(nrecvs, r_waits, &status[0]);
499 CHKERR MPI_Waitall(nsends, s_waits, &status[0]);
503 "Rank %d nb. shared to synchronise parents ents %u\n",
508 for (
int kk = 0; kk < nrecvs; kk++) {
510 int len = olengths[kk];
511 int *data_from_proc = rbuf[kk];
513 for (
int ee = 0; ee < len;) {
516 unsigned long long uulong_bit;
519 bcopy(&data_from_proc[ee], &parent,
sizeof(
EntityHandle));
521 bcopy(&data_from_proc[ee], &uulong_bit,
sizeof(
unsigned long long));
524 CHKERR set_parent(ent, parent);
528 MOFEM_LOG_C(
"SYNC", Sev::noisy,
"received %lu (%lu) from %d at %d\n",
536 CHKERR PetscFree(s_waits);
537 CHKERR PetscFree(rbuf[0]);
539 CHKERR PetscFree(r_waits);
541 CHKERR PetscFree(olengths);
542 CHKERR PetscCommDestroy(&comm);
551 const Problem *problem_ptr,
const std::string &fe_name,
int verb) {
554 ParallelComm *pcomm = ParallelComm::get_pcomm(
556 std::vector<int> shprocs(MAX_SHARING_PROCS, 0);
557 std::vector<EntityHandle> shhandles(MAX_SHARING_PROCS, 0);
559 Tag th_gid = m_field.
get_moab().globalId_tag();
564 CHKERR PetscLayoutGetRange(layout, &gid, &last_gid);
565 CHKERR PetscLayoutDestroy(&layout);
575 0, (*fe_miit)->getFEUId()));
579 get_id_for_max_type<MBENTITYSET>(), (*fe_miit)->getFEUId()));
580 for (; fit != hi_fe_it; ++fit) {
582 auto ent = (*fit)->getEnt();
583 auto part = (*fit)->getPart();
586 CHKERR m_field.
get_moab().tag_set_data(pcomm->part_tag(), &ent, 1, &part);
587 if (part == pcomm->rank()) {
594 if (pcomm->size() > 1) {
596 unsigned char pstatus = 0;
597 if (pcomm->rank() != part) {
598 pstatus = PSTATUS_NOT_OWNED;
599 pstatus |= PSTATUS_GHOST;
602 if (pcomm->size() > 2) {
603 pstatus |= PSTATUS_SHARED;
604 pstatus |= PSTATUS_MULTISHARED;
606 pstatus |= PSTATUS_SHARED;
610 for (
size_t rr = 0; rr < pcomm->size(); ++rr) {
611 if (rr != pcomm->rank()) {
612 shhandles[rrr] = ent;
617 for (; rrr != pcomm->size(); ++rrr)
620 if (pstatus & PSTATUS_SHARED) {
621 CHKERR m_field.
get_moab().tag_set_data(pcomm->sharedp_tag(), &ent, 1,
623 CHKERR m_field.
get_moab().tag_set_data(pcomm->sharedh_tag(), &ent, 1,
627 if (pstatus & PSTATUS_MULTISHARED) {
628 CHKERR m_field.
get_moab().tag_set_data(pcomm->sharedps_tag(), &ent, 1,
630 CHKERR m_field.
get_moab().tag_set_data(pcomm->sharedhs_tag(), &ent, 1,
633 CHKERR m_field.
get_moab().tag_set_data(pcomm->pstatus_tag(), &ent, 1,
639 CHKERR pcomm->exchange_tags(th_gid, ents);
644 const std::string name,
const std::string &fe_name,
int verb) {
655 const int num_entities,
656 const int owner_proc,
int verb) {
662 ParallelComm *pcomm = ParallelComm::get_pcomm(
665 Range all_ents_range;
666 all_ents_range.insert_list(entities, entities + num_entities);
668 auto get_tag = [&]() {
return m_field.
get_moab().globalId_tag(); };
670 auto delete_tag = [&](
auto &&th_gid) {
676 auto resolve_shared_ents = [&](
auto &&th_gid,
auto &all_ents_range) {
677 auto set_gid = [&](
auto &th_gid) {
678 std::vector<int> gids(num_entities);
679 for (
size_t g = 0;
g != all_ents_range.size(); ++
g)
687 auto get_skin_ents = [&](
auto &all_ents_range) {
688 std::array<Range, 4> proc_ents_skin;
689 proc_ents_skin[3] = all_ents_range.subset_by_dimension(3);
690 proc_ents_skin[2] = all_ents_range.subset_by_dimension(2);
691 proc_ents_skin[1] = all_ents_range.subset_by_dimension(1);
692 proc_ents_skin[0] = all_ents_range.subset_by_dimension(0);
693 return proc_ents_skin;
696 auto resolve_dim = [&](
auto &all_ents_range) {
697 for (
int resolve_dim = 3; resolve_dim >= 0; --resolve_dim) {
698 if (all_ents_range.num_of_dimension(resolve_dim))
704 auto get_proc_ent = [&](
auto &all_ents_range) {
707 proc_ent = all_ents_range;
711 auto resolve_shared_ents = [&](
auto &&proc_ents,
auto &&skin_ents) {
712 return pcomm->resolve_shared_ents(
713 0, proc_ents, resolve_dim(all_ents_range),
714 resolve_dim(all_ents_range), skin_ents.data(), set_gid(th_gid));
717 CHKERR resolve_shared_ents(get_proc_ent(all_ents_range),
718 get_skin_ents(all_ents_range));
723 CHKERR delete_tag(resolve_shared_ents(get_tag(), all_ents_range));
731 CHKERR pcomm->get_owner_handle(e, moab_owner_proc, moab_owner_handle);
733 unsigned char pstatus = 0;
735 CHKERR m_field.
get_moab().tag_get_data(pcomm->pstatus_tag(), &e, 1,
738 std::vector<int> shprocs(MAX_SHARING_PROCS, 0);
739 std::vector<EntityHandle> shhandles(MAX_SHARING_PROCS, 0);
741 CHKERR m_field.
get_moab().tag_get_data(pcomm->sharedp_tag(), &e, 1,
743 CHKERR m_field.
get_moab().tag_get_data(pcomm->sharedh_tag(), &e, 1,
745 if (pstatus & PSTATUS_MULTISHARED) {
746 CHKERR m_field.
get_moab().tag_get_data(pcomm->sharedps_tag(), &e, 1,
748 CHKERR m_field.
get_moab().tag_get_data(pcomm->sharedhs_tag(), &e, 1,
752 std::ostringstream ss;
755 if (!(pstatus & PSTATUS_NOT_OWNED))
757 if (pstatus & PSTATUS_SHARED)
758 ss <<
"PSTATUS_SHARED ";
759 if (pstatus & PSTATUS_MULTISHARED)
760 ss <<
"PSTATUS_MULTISHARED ";
762 ss <<
"owner " << moab_owner_proc <<
" (" << owner_proc <<
") ";
766 ss << shprocs[
r] <<
" ";
770 ss << shhandles[
r] <<
" ";
773 MOFEM_LOG(
"SYNC", Sev::noisy) << ss.str();
779 for (
auto e : all_ents_range)
788 const int owner_proc,
793 const int num_ents = entities.size();
794 std::vector<EntityHandle> vec_ents(num_ents);
795 std::copy(entities.begin(), entities.end(), vec_ents.begin());
804 const int owner_proc,
int verb) {
809 std::vector<EntityHandle> field_ents;
810 CHKERR m_field.
get_moab().get_entities_by_handle(field_meshset, field_ents,
824 Range exchange_ents_data_verts, exchange_ents_data;
833 for (
auto it = lo; it != hi; ++it)
836 ((*it)->getPStatus()) &&
838 (*it)->getNbDofsOnEnt()
841 if ((*it)->getEntType() == MBVERTEX)
842 exchange_ents_data_verts.insert((*it)->getEnt());
844 exchange_ents_data.insert((*it)->getEnt());
848 ParallelComm *pcomm = ParallelComm::get_pcomm(
851 auto exchange = [&](
const Range &ents, Tag
th) {
854 std::vector<Tag> tags;
856 CHKERR pcomm->exchange_tags(tags, tags, ents);
861 CHKERR exchange(exchange_ents_data_verts, field_ptr->th_FieldDataVerts);
862 CHKERR exchange(exchange_ents_data, field_ptr->th_FieldData);
869 const int adj_dim,
const int n_parts,
870 Tag *th_vertex_weights, Tag *th_edge_weights,
871 Tag *th_part_weights,
int verb,
const bool debug) {
876 int rstart, rend, nb_elems;
880 CHKERR PetscLayoutSetBlockSize(layout, 1);
881 CHKERR PetscLayoutSetSize(layout, ents.size());
882 CHKERR PetscLayoutSetUp(layout);
883 CHKERR PetscLayoutGetSize(layout, &nb_elems);
884 CHKERR PetscLayoutGetRange(layout, &rstart, &rend);
885 CHKERR PetscLayoutDestroy(&layout);
888 <<
"Finite elements in problem: row lower " << rstart <<
" row upper "
889 << rend <<
" nb. elems " << nb_elems <<
" ( " << ents.size() <<
" )";
894 std::vector<EntityHandle> weight_ents;
895 weight_ents.reserve(rend - rstart + 1);
899 std::vector<int> adj;
900 AdjBridge(
const EntityHandle ent, std::vector<int> &adj)
901 : ent(ent), adj(adj) {}
904 typedef multi_index_container<
908 hashed_unique<member<AdjBridge, EntityHandle, &AdjBridge::ent>>
913 auto get_it = [&](
auto i) {
914 auto it = ents.begin();
916 if (it == ents.end())
924 proc_ents.insert(get_it(rstart), get_it(rend));
925 if (proc_ents.size() != rend - rstart)
927 "Wrong number of elements in range %d != %d", proc_ents.size(),
932 proc_ents, adj_dim,
true, all_dim_ents, moab::Interface::UNION);
934 AdjBridgeMap adj_bridge_map;
935 auto hint = adj_bridge_map.begin();
936 std::vector<int> adj;
937 for (
auto ent : all_dim_ents) {
939 CHKERR m_field.
get_moab().get_adjacencies(&ent, 1, dim,
false, adj_ents);
940 adj_ents = intersect(adj_ents, ents);
942 adj.reserve(adj_ents.size());
943 for (
auto a : adj_ents)
944 adj.emplace_back(ents.index(
a));
945 hint = adj_bridge_map.emplace_hint(hint, ent, adj);
951 const int nb_loc_elements = rend - rstart;
952 std::vector<int>
i(nb_loc_elements + 1, 0),
j;
954 std::vector<int> row_adj;
955 Range::iterator fe_it;
958 for (fe_it = proc_ents.begin(), ii = rstart, jj = 0, max_row_size = 0;
959 fe_it != proc_ents.end(); ++fe_it, ++ii) {
963 "not yet implemented, don't know what to do for meshset "
968 CHKERR m_field.
get_moab().get_adjacencies(&*fe_it, 1, adj_dim,
false,
970 dim_ents = intersect(dim_ents, all_dim_ents);
973 for (
auto e : dim_ents) {
974 auto adj_it = adj_bridge_map.find(e);
975 if (adj_it != adj_bridge_map.end()) {
977 for (
const auto idx : adj_it->adj)
978 row_adj.push_back(idx);
985 std::sort(row_adj.begin(), row_adj.end());
986 auto end = std::unique(row_adj.begin(), row_adj.end());
988 size_t row_size = std::distance(row_adj.begin(), end);
989 max_row_size = std::max(max_row_size, row_size);
990 if (
j.capacity() < (nb_loc_elements - jj) * max_row_size)
991 j.reserve(nb_loc_elements * max_row_size);
994 auto diag = ents.index(*fe_it);
995 for (
auto it = row_adj.begin(); it != end; ++it)
1002 if (th_vertex_weights != NULL)
1003 weight_ents.push_back(*fe_it);
1009 CHKERR PetscMalloc(
i.size() *
sizeof(
int), &_i);
1010 CHKERR PetscMalloc(
j.size() *
sizeof(
int), &_j);
1011 copy(
i.begin(),
i.end(), _i);
1012 copy(
j.begin(),
j.end(), _j);
1016 int *vertex_weights = NULL;
1017 if (th_vertex_weights != NULL) {
1018 CHKERR PetscMalloc(weight_ents.size() *
sizeof(
int), &vertex_weights);
1020 &*weight_ents.begin(),
1021 weight_ents.size(), vertex_weights);
1027 CHKERR MatCreateMPIAdj(m_field.
get_comm(), rend - rstart, nb_elems, _i, _j,
1029 CHKERR MatSetOption(Adj, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
1033 CHKERR MatConvert(Adj, MATMPIAIJ, MAT_INITIAL_MATRIX, &
A);
1034 CHKERR MatView(
A, PETSC_VIEWER_DRAW_WORLD);
1041 MOFEM_LOG(
"WORLD", Sev::verbose) <<
"Start";
1043 MatPartitioning part;
1047 CHKERR MatPartitioningSetAdjacency(part, Adj);
1048 CHKERR MatPartitioningSetFromOptions(part);
1049 CHKERR MatPartitioningSetNParts(part, n_parts);
1050 if (th_vertex_weights != NULL) {
1051 CHKERR MatPartitioningSetVertexWeights(part, vertex_weights);
1054 PetscObjectTypeCompare((PetscObject)part, MATPARTITIONINGPARMETIS, &same);
1057 CHKERR MatPartitioningApply_Parmetis_MoFEM(part, &is);
1060 CHKERR MatPartitioningApply(part, &is);
1063 MOFEM_LOG(
"WORLD", Sev::verbose) <<
"End";
1066 IS is_gather, is_num, is_gather_num;
1067 CHKERR ISAllGather(is, &is_gather);
1068 CHKERR ISPartitioningToNumbering(is, &is_num);
1069 CHKERR ISAllGather(is_num, &is_gather_num);
1071 const int *part_number, *gids;
1072 CHKERR ISGetIndices(is_gather, &part_number);
1073 CHKERR ISGetIndices(is_gather_num, &gids);
1076 ParallelComm *pcomm = ParallelComm::get_pcomm(
1078 Tag part_tag = pcomm->part_tag();
1079 CHKERR m_field.
get_moab().tag_set_data(part_tag, ents, part_number);
1080 Tag gid_tag = m_field.
get_moab().globalId_tag();
1082 std::map<int, Range> parts_ents;
1085 Range::iterator eit = ents.begin();
1086 for (
int ii = 0; eit != ents.end(); eit++, ii++) {
1087 parts_ents[part_number[ii]].insert(*eit);
1091 0, MBENTITYSET, &part_tag, NULL, 1, tagged_sets,
1092 moab::Interface::UNION);
1093 if (!tagged_sets.empty())
1094 CHKERR m_field.
get_moab().tag_delete_data(part_tag, tagged_sets);
1096 if (n_parts > (
int)tagged_sets.size()) {
1098 int num_new = n_parts - tagged_sets.size();
1099 for (
int i = 0;
i < num_new;
i++) {
1102 tagged_sets.insert(new_set);
1104 }
else if (n_parts < (
int)tagged_sets.size()) {
1106 int num_del = tagged_sets.size() - n_parts;
1107 for (
int i = 0;
i < num_del;
i++) {
1114 std::vector<int> dum_ids(n_parts);
1115 for (
int i = 0;
i < n_parts;
i++)
1122 for (
int pp = 0; pp != n_parts; pp++) {
1123 Range dim_ents = parts_ents[pp].subset_by_dimension(dim);
1124 for (
int dd = dim - 1;
dd >= 0;
dd--) {
1127 dim_ents,
dd,
false, adj_ents, moab::Interface::UNION);
1128 parts_ents[pp].merge(adj_ents);
1131 for (
int pp = 1; pp != n_parts; pp++) {
1132 for (
int ppp = 0; ppp != pp; ppp++) {
1133 parts_ents[pp] = subtract(parts_ents[pp], parts_ents[ppp]);
1137 for (
int pp = 0; pp != n_parts; pp++) {
1138 CHKERR m_field.
get_moab().add_entities(tagged_sets[pp], parts_ents[pp]);
1141 auto set_part = [&]() {
1143 for (EntityType
t = MBEDGE;
t != MBENTITYSET; ++
t) {
1144 for (
int pp = 0; pp != n_parts; pp++) {
1145 Range type_ents = parts_ents[pp].subset_by_type(
t);
1146 CHKERR m_field.
get_moab().tag_clear_data(part_tag, type_ents, &pp);
1152 auto set_gid = [&]() {
1154 for (EntityType
t = MBVERTEX;
t != MBENTITYSET; ++
t) {
1160 for (
int pp = 0; pp != n_parts; pp++) {
1161 Range type_ents = parts_ents[pp].subset_by_type(
t);
1163 auto eit = type_ents.begin();
1164 for (; eit != type_ents.end();) {
1166 gid_tag, eit, type_ents.end(), count, ptr);
1167 auto gid_tag_ptr =
static_cast<int *
>(ptr);
1168 for (; count > 0; --count) {
1188 for (
int rr = 0; rr != n_parts; rr++) {
1190 ss <<
"out_part_" << rr <<
".vtk";
1191 MOFEM_LOG(
"SELF", Sev::inform) <<
"Save debug part mesh " << ss.str();
1195 CHKERR m_field.
get_moab().write_file(ss.str().c_str(),
"VTK",
"",
1202 CHKERR ISRestoreIndices(is_gather, &part_number);
1203 CHKERR ISRestoreIndices(is_gather_num, &gids);
1204 CHKERR ISDestroy(&is_num);
1205 CHKERR ISDestroy(&is_gather_num);
1206 CHKERR ISDestroy(&is_gather);
1208 CHKERR MatPartitioningDestroy(&part);