v0.9.2
CommInterface.cpp
Go to the documentation of this file.
1 /** \file CommInterface.cpp
2  * \brief Functions for interprocessor communications
3  * \mofem_comm
4  */
5 
6 /* MoFEM is free software: you can redistribute it and/or modify it under
7  * the terms of the GNU Lesser General Public License as published by the
8  * Free Software Foundation, either version 3 of the License, or (at your
9  * option) any later version.
10  *
11  * MoFEM is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
14  * License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with MoFEM. If not, see <http://www.gnu.org/licenses/>
18  */
19 
20 namespace MoFEM {
21 
23  UnknownInterface **iface) const {
25  *iface = NULL;
26  if (uuid == IDD_MOFEMComm) {
27  *iface = const_cast<CommInterface *>(this);
29  }
30  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "unknown interface");
32 }
33 
35  : cOre(const_cast<MoFEM::Core &>(core)), dEbug(false) {}
37 
39  MoFEM::Interface &m_field = cOre;
40  auto ref_ents_ptr = m_field.get_ref_ents();
42 
43  // make a buffer
44  std::vector<std::vector<EntityHandle>> sbuffer(m_field.get_comm_size());
45 
46  Range::iterator eit = ents.begin();
47  for (; eit != ents.end(); eit++) {
48 
49  auto meit = ref_ents_ptr->get<Ent_mi_tag>().find(*eit);
50  if (meit == ref_ents_ptr->get<Ent_mi_tag>().end()) {
51  continue;
52  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
53  "rank %d entity %lu not exist on database, local entity can not "
54  "be found for this owner",
55  m_field.get_comm_rank(), *eit);
56  }
57 
58  unsigned char pstatus = (*meit)->getPStatus();
59 
60  if (pstatus == 0)
61  continue;
62 
63  if (verb >= NOISY) {
64  std::ostringstream zz;
65  zz << "pstatus " << std::bitset<8>(pstatus) << " ";
66  PetscSynchronizedPrintf(m_field.get_comm(), "%s", zz.str().c_str());
67  }
68 
69  for (int proc = 0;
70  proc < MAX_SHARING_PROCS && -1 != (*meit)->getSharingProcsPtr()[proc];
71  proc++) {
72  if ((*meit)->getSharingProcsPtr()[proc] == -1)
73  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSIBLE_CASE,
74  "sharing processor not set");
75 
76  if ((*meit)->getSharingProcsPtr()[proc] == m_field.get_comm_rank())
77  continue;
78 
79  EntityHandle handle_on_sharing_proc =
80  (*meit)->getSharingHandlersPtr()[proc];
81  sbuffer[(*meit)->getSharingProcsPtr()[proc]].push_back(
82  handle_on_sharing_proc);
83  if (verb >= NOISY)
84  PetscSynchronizedPrintf(
85  m_field.get_comm(), "send %lu (%lu) to %d at %d\n",
86  (*meit)->getRefEnt(), handle_on_sharing_proc,
87  (*meit)->getSharingProcsPtr()[proc], m_field.get_comm_rank());
88 
89  if (!(pstatus & PSTATUS_MULTISHARED))
90  break;
91  }
92  }
93 
94  int nsends = 0; // number of messages to send
95  std::vector<int> sbuffer_lengths(
96  m_field.get_comm_size()); // length of the message to proc
97  const size_t block_size = sizeof(EntityHandle) / sizeof(int);
98  for (int proc = 0; proc < m_field.get_comm_size(); proc++) {
99 
100  if (!sbuffer[proc].empty()) {
101 
102  sbuffer_lengths[proc] = sbuffer[proc].size() * block_size;
103  nsends++;
104 
105  } else {
106 
107  sbuffer_lengths[proc] = 0;
108  }
109  }
110 
111  // Make sure it is a PETSc m_field.get_comm()
112  CHKERR PetscCommDuplicate(m_field.get_comm(), &m_field.get_comm(), NULL);
113 
114  std::vector<MPI_Status> status(m_field.get_comm_size());
115 
116  // Computes the number of messages a node expects to receive
117  int nrecvs; // number of messages received
118  CHKERR PetscGatherNumberOfMessages(m_field.get_comm(), NULL,
119  &sbuffer_lengths[0], &nrecvs);
120 
121  // Computes info about messages that a MPI-node will receive, including
122  // (from-id,length) pairs for each message.
123  int *onodes; // list of node-ids from which messages are expected
124  int *olengths; // corresponding message lengths
125  CHKERR PetscGatherMessageLengths(m_field.get_comm(), nsends, nrecvs,
126  &sbuffer_lengths[0], &onodes, &olengths);
127 
128  // Gets a unique new tag from a PETSc communicator. All processors that share
129  // the communicator MUST call this routine EXACTLY the same number of times.
130  // This tag should only be used with the current objects communicator; do NOT
131  // use it with any other MPI communicator.
132  int tag;
133  CHKERR PetscCommGetNewTag(m_field.get_comm(), &tag);
134 
135  // Allocate a buffer sufficient to hold messages of size specified in
136  // olengths. And post Irecvs on these buffers using node info from onodes
137  int **rbuf; // must bee freed by user
138  MPI_Request *r_waits; // must bee freed by user
139  // rbuf has a pointers to messages. It has size of of nrecvs (number of
140  // messages) +1. In the first index a block is allocated,
141  // such that rbuf[i] = rbuf[i-1]+olengths[i-1].
142  CHKERR PetscPostIrecvInt(m_field.get_comm(), tag, nrecvs, onodes, olengths,
143  &rbuf, &r_waits);
144 
145  MPI_Request *s_waits; // status of sens messages
146  CHKERR PetscMalloc1(nsends, &s_waits);
147 
148  // Send messages
149  for (int proc = 0, kk = 0; proc < m_field.get_comm_size(); proc++) {
150  if (!sbuffer_lengths[proc])
151  continue; // no message to send to this proc
152  CHKERR MPI_Isend(&(sbuffer[proc])[0], // buffer to send
153  sbuffer_lengths[proc], // message length
154  MPIU_INT, proc, // to proc
155  tag, m_field.get_comm(), s_waits + kk);
156  kk++;
157  }
158 
159  // Wait for received
160  if (nrecvs)
161  CHKERR MPI_Waitall(nrecvs, r_waits, &status[0]);
162 
163  // Wait for send messages
164  if (nsends)
165  CHKERR MPI_Waitall(nsends, s_waits, &status[0]);
166 
167  if (verb >= VERY_VERBOSE) {
168  PetscSynchronizedPrintf(m_field.get_comm(), "Rank %d nb. before ents %u\n",
169  m_field.get_comm_rank(), ents.size());
170  }
171 
172  // synchronise range
173  for (int kk = 0; kk < nrecvs; kk++) {
174 
175  int len = olengths[kk];
176  int *data_from_proc = rbuf[kk];
177 
178  for (int ee = 0; ee < len; ee += block_size) {
179 
180  EntityHandle ent;
181  bcopy(&data_from_proc[ee], &ent, sizeof(EntityHandle));
182  auto meit = ref_ents_ptr->get<Ent_mi_tag>().find(ent);
183  if (meit == ref_ents_ptr->get<Ent_mi_tag>().end())
184  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
185  "rank %d entity %lu not exist on database, local entity can "
186  "not be found for this owner",
187  m_field.get_comm_rank(), ent);
188 
189  if (verb >= VERY_VERBOSE)
190  PetscSynchronizedPrintf(
191  m_field.get_comm(), "received %ul (%ul) from %d at %d\n",
192  (*meit)->getRefEnt(), ent, onodes[kk], m_field.get_comm_rank());
193 
194  ents.insert((*meit)->getRefEnt());
195  }
196  }
197 
198  if (verb >= VERBOSE)
199  PetscSynchronizedPrintf(m_field.get_comm(), "Rank %d nb. after ents %u\n",
200  m_field.get_comm_rank(), ents.size());
201 
202  // Cleaning
203  CHKERR PetscFree(s_waits);
204  CHKERR PetscFree(rbuf[0]);
205  CHKERR PetscFree(rbuf);
206  CHKERR PetscFree(r_waits);
207  CHKERR PetscFree(onodes);
208  CHKERR PetscFree(olengths);
209 
210  if (verb >= VERBOSE)
211  PetscSynchronizedFlush(m_field.get_comm(), PETSC_STDOUT);
212 
214 }
215 
217  int verb) {
218  MoFEM::Interface &m_field = cOre;
220  EntityHandle idm = m_field.get_field_meshset(name);
221  Range ents;
222  CHKERR m_field.get_moab().get_entities_by_handle(idm, ents, false);
223  CHKERR synchroniseEntities(ents, verb);
224  CHKERR m_field.get_moab().add_entities(idm, ents);
226 }
227 
229  const Problem *problem_ptr, const std::string &fe_name, int verb) {
230  MoFEM::Interface &m_field = cOre;
232  ParallelComm *pcomm = ParallelComm::get_pcomm(
233  &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
234  std::vector<int> shprocs(MAX_SHARING_PROCS, 0);
235  std::vector<EntityHandle> shhandles(MAX_SHARING_PROCS, 0);
236  Range ents;
237  Tag th_gid;
238  const int zero = 0;
239  CHKERR m_field.get_moab().tag_get_handle(GLOBAL_ID_TAG_NAME, 1,
240  MB_TYPE_INTEGER, th_gid,
241  MB_TAG_DENSE | MB_TAG_CREAT, &zero);
242  PetscLayout layout;
243  CHKERR problem_ptr->getNumberOfElementsByNameAndPart(m_field.get_comm(),
244  fe_name, &layout);
245  int gid, last_gid;
246  CHKERR PetscLayoutGetRange(layout, &gid, &last_gid);
247  CHKERR PetscLayoutDestroy(&layout);
248  for (_IT_NUMEREDFE_BY_NAME_FOR_LOOP_(problem_ptr, fe_name, fe_it)) {
249  EntityHandle ent = (*fe_it)->getEnt();
250  ents.insert(ent);
251  unsigned int part = (*fe_it)->getPart();
252  CHKERR m_field.get_moab().tag_set_data(pcomm->part_tag(), &ent, 1, &part);
253  if (part == pcomm->rank()) {
254  CHKERR m_field.get_moab().tag_set_data(th_gid, &ent, 1, &gid);
255  gid++;
256  }
257  shprocs.clear();
258  shhandles.clear();
259 
260  if (pcomm->size() > 1) {
261 
262  unsigned char pstatus = 0;
263  if (pcomm->rank() != part) {
264  pstatus = PSTATUS_NOT_OWNED;
265  pstatus |= PSTATUS_GHOST;
266  }
267 
268  if (pcomm->size() > 2) {
269  pstatus |= PSTATUS_SHARED;
270  pstatus |= PSTATUS_MULTISHARED;
271  } else {
272  pstatus |= PSTATUS_SHARED;
273  }
274 
275  size_t rrr = 0;
276  for (size_t rr = 0; rr < pcomm->size(); ++rr) {
277  if (rr != pcomm->rank()) {
278  shhandles[rrr] = ent;
279  shprocs[rrr] = rr;
280  ++rrr;
281  }
282  }
283  for (; rrr != pcomm->size(); ++rrr)
284  shprocs[rrr] = -1;
285 
286  if (pstatus & PSTATUS_SHARED) {
287  CHKERR m_field.get_moab().tag_set_data(pcomm->sharedp_tag(), &ent, 1,
288  &shprocs[0]);
289  CHKERR m_field.get_moab().tag_set_data(pcomm->sharedh_tag(), &ent, 1,
290  &shhandles[0]);
291  }
292 
293  if (pstatus & PSTATUS_MULTISHARED) {
294  CHKERR m_field.get_moab().tag_set_data(pcomm->sharedps_tag(), &ent, 1,
295  &shprocs[0]);
296  CHKERR m_field.get_moab().tag_set_data(pcomm->sharedhs_tag(), &ent, 1,
297  &shhandles[0]);
298  }
299  CHKERR m_field.get_moab().tag_set_data(pcomm->pstatus_tag(), &ent, 1,
300  &pstatus);
301  }
302  }
303  CHKERR pcomm->exchange_tags(th_gid, ents);
305 }
306 
308  const std::string &name, const std::string &fe_name, int verb) {
309  MoFEM::Interface &m_field = cOre;
311  const Problem *problem_ptr;
312  CHKERR m_field.get_problem(name, &problem_ptr);
313  CHKERR resolveSharedFiniteElements(problem_ptr, fe_name, verb);
315 }
316 
319  const int num_entities,
320  const int owner_proc, int verb) {
321  MoFEM::Interface &m_field = cOre;
323 
324  if (m_field.get_comm_size() > 1) {
325 
326  ParallelComm *pcomm = ParallelComm::get_pcomm(
327  &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
328 
329  Range all_ents_range;
330  all_ents_range.insert_list(entities, entities + num_entities);
331 
332  auto get_tag = [&]() {
333  Tag th_gid;
334  const int zero = 0;
335  CHKERR m_field.get_moab().tag_get_handle(
336  "TMP_GLOBAL_ID_TAG_NAME", 1, MB_TYPE_INTEGER, th_gid,
337  MB_TAG_SPARSE | MB_TAG_CREAT, &zero);
338  return th_gid;
339  };
340 
341  auto delete_tag = [&](auto &&th_gid) {
343  CHKERR m_field.get_moab().tag_delete(th_gid);
345  };
346 
347  auto resolve_shared_ents = [&](auto &&th_gid, auto &all_ents_range) {
348  auto set_gid = [&](auto &th_gid) {
349  std::vector<int> gids(num_entities);
350  for (size_t g = 0; g != all_ents_range.size(); ++g)
351  gids[g] = g + 1;
352  CHKERR m_field.get_moab().tag_set_data(th_gid, all_ents_range,
353  &*gids.begin());
354 
355  return &th_gid;
356  };
357 
358  auto get_skin_ents = [&](auto &all_ents_range) {
359  std::array<Range, 4> proc_ents_skin;
360  proc_ents_skin[3] = all_ents_range.subset_by_dimension(3);
361  proc_ents_skin[2] = all_ents_range.subset_by_dimension(2);
362  proc_ents_skin[1] = all_ents_range.subset_by_dimension(1);
363  proc_ents_skin[0] = all_ents_range.subset_by_dimension(0);
364  return proc_ents_skin;
365  };
366 
367  auto resolve_dim = [&](auto &all_ents_range) {
368  for (int resolve_dim = 3; resolve_dim >= 0; --resolve_dim) {
369  if (all_ents_range.num_of_dimension(resolve_dim))
370  return resolve_dim;
371  }
372  return -1;
373  };
374 
375  auto get_proc_ent = [&](auto &all_ents_range) {
376  Range proc_ent;
377  if (m_field.get_comm_rank() == owner_proc)
378  proc_ent = all_ents_range;
379  return proc_ent;
380  };
381 
382  auto resolve_shared_ents = [&](auto &&proc_ents, auto &&skin_ents) {
383  return pcomm->resolve_shared_ents(
384  0, proc_ents, resolve_dim(all_ents_range),
385  resolve_dim(all_ents_range), skin_ents.data(), set_gid(th_gid));
386  };
387 
388  CHKERR resolve_shared_ents(get_proc_ent(all_ents_range),
389  get_skin_ents(all_ents_range));
390 
391  return th_gid;
392  };
393 
394  CHKERR delete_tag(resolve_shared_ents(get_tag(), all_ents_range));
395 
396  if (verb >= NOISY) {
397 
398  auto print_owner = [&](const EntityHandle e) {
400  int moab_owner_proc;
401  EntityHandle moab_owner_handle;
402  CHKERR pcomm->get_owner_handle(e, moab_owner_proc, moab_owner_handle);
403 
404  unsigned char pstatus = 0;
405 
406  CHKERR m_field.get_moab().tag_get_data(pcomm->pstatus_tag(), &e, 1,
407  &pstatus);
408 
409  std::vector<int> shprocs(MAX_SHARING_PROCS, 0);
410  std::vector<EntityHandle> shhandles(MAX_SHARING_PROCS, 0);
411 
412  CHKERR m_field.get_moab().tag_get_data(pcomm->sharedp_tag(), &e, 1,
413  &shprocs[0]);
414  CHKERR m_field.get_moab().tag_get_data(pcomm->sharedh_tag(), &e, 1,
415  &shhandles[0]);
416  if (pstatus & PSTATUS_MULTISHARED) {
417  CHKERR m_field.get_moab().tag_get_data(pcomm->sharedps_tag(), &e, 1,
418  &shprocs[0]);
419  CHKERR m_field.get_moab().tag_get_data(pcomm->sharedhs_tag(), &e, 1,
420  &shhandles[0]);
421  }
422 
423  std::ostringstream ss;
424 
425  ss << "Rank " << m_field.get_comm_rank() << " ";
426  if (!(pstatus & PSTATUS_NOT_OWNED))
427  ss << "OWNER ";
428  if (pstatus & PSTATUS_SHARED)
429  ss << "PSTATUS_SHARED ";
430  if (pstatus & PSTATUS_MULTISHARED)
431  ss << "PSTATUS_MULTISHARED ";
432 
433  ss << "owner " << moab_owner_proc << " (" << owner_proc << ") ";
434 
435  ss << "shprocs: ";
436  for (size_t r = 0; r != m_field.get_comm_size() + 1; ++r)
437  ss << shprocs[r] << " ";
438 
439  ss << "shhandles: ";
440  for (size_t r = 0; r != m_field.get_comm_size() + 1; ++r)
441  ss << shhandles[r] << " ";
442 
443  ss << std::endl;
444  PetscSynchronizedPrintf(m_field.get_comm(), "%s", ss.str().c_str());
445  PetscSynchronizedFlush(m_field.get_comm(), PETSC_STDOUT);
446 
448  };
449 
450  for (auto e : all_ents_range)
451  CHKERR print_owner(e);
452  }
453  }
454 
456 }
457 
459  const int owner_proc,
460  int verb) {
461  MoFEM::Interface &m_field = cOre;
463  if (m_field.get_comm_size() > 1) {
464  const int num_ents = entities.size();
465  std::vector<EntityHandle> vec_ents(num_ents);
466  std::copy(entities.begin(), entities.end(), vec_ents.begin());
467  CHKERR makeEntitiesMultishared(&*vec_ents.begin(), num_ents, owner_proc,
468  verb);
469  }
471 }
472 
474 CommInterface::makeFieldEntitiesMultishared(const std::string field_name,
475  const int owner_proc, int verb) {
476  MoFEM::Interface &m_field = cOre;
478  if (m_field.get_comm_size() > 1) {
479  EntityHandle field_meshset = m_field.get_field_meshset(field_name);
480  std::vector<EntityHandle> field_ents;
481  CHKERR m_field.get_moab().get_entities_by_handle(field_meshset, field_ents,
482  true);
483  CHKERR makeEntitiesMultishared(&*field_ents.begin(), field_ents.size(),
484  owner_proc, verb);
485  }
487 }
488 
490  int verb) {
491  MoFEM::Interface &m_field = cOre;
493  if (m_field.get_comm_size() > 1) {
494 
495  auto *field_ents = m_field.get_field_ents();
496 
497  Range exchange_ents_data_verts, exchange_ents_data;
498 
499  for (auto it = field_ents->get<FieldName_mi_tag>().lower_bound(field_name);
500  it != field_ents->get<FieldName_mi_tag>().upper_bound(field_name);
501  ++it)
502  if (
503 
504  ((*it)->getPStatus()) &&
505 
506  (*it)->getNbDofsOnEnt()
507 
508  ) {
509  if ((*it)->getEntType() == MBVERTEX)
510  exchange_ents_data_verts.insert((*it)->getRefEnt());
511  else
512  exchange_ents_data.insert((*it)->getRefEnt());
513  }
514 
515  auto field_ptr = m_field.get_field_structure(field_name);
516  ParallelComm *pcomm = ParallelComm::get_pcomm(
517  &m_field.get_moab(), m_field.get_basic_entity_data_ptr()->pcommID);
518 
519  auto exchange = [&](const Range &ents, Tag th) {
521  if (!ents.empty()) {
522  std::vector<Tag> tags;
523  tags.push_back(th);
524  CHKERR pcomm->exchange_tags(tags, tags, ents);
525  }
527  };
528 
529  CHKERR exchange(exchange_ents_data_verts, field_ptr->th_FieldDataVerts);
530  CHKERR exchange(exchange_ents_data, field_ptr->th_FieldData);
531  }
533 }
534 
535 } // namespace MoFEM
#define _IT_NUMEREDFE_BY_NAME_FOR_LOOP_(PROBLEMPTR, NAME, IT)
MoFEMErrorCode query_interface(const MOFEMuuid &uuid, UnknownInterface **iface) const
Deprecated interface functions.
MoFEM interface unique ID.
virtual moab::Interface & get_moab()=0
virtual boost::shared_ptr< BasicEntityData > & get_basic_entity_data_ptr()=0
Get pointer to basic entity data.
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:507
virtual int get_comm_size() const =0
virtual const FieldEntity_multiIndex * get_field_ents() const =0
Get the field ents object.
base class for all interface classes
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:483
virtual const RefEntity_multiIndex * get_ref_ents() const =0
Get the ref ents object.
Core (interface) class.
Definition: Core.hpp:50
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:514
keeps basic data about problemThis is low level structure with information about problem,...
MoFEMErrorCode makeFieldEntitiesMultishared(const std::string field_name, const int owner_proc=0, int verb=DEFAULT_VERBOSITY)
make field entities multi shared
implementation of Data Operators for Forces and Sources
Definition: Common.hpp:21
MoFEMErrorCode resolveSharedFiniteElements(const Problem *problem_ptr, const std::string &fe_name, int verb=DEFAULT_VERBOSITY)
resolve shared entities for finite elements in the problem
MoFEMErrorCode synchroniseEntities(Range &ent, int verb=DEFAULT_VERBOSITY)
~CommInterface()
Destructor.
virtual EntityHandle get_field_meshset(const std::string &name) const =0
get field meshset
virtual int get_comm_rank() const =0
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
Definition: Exceptions.hpp:67
MoFEMErrorCode synchroniseFieldEntities(const std::string name, int verb=DEFAULT_VERBOSITY)
MoFEMErrorCode makeEntitiesMultishared(const EntityHandle *entities, const int num_entities, const int owner_proc=0, int verb=DEFAULT_VERBOSITY)
make entities from proc 0 shared on all proc
#define CHKERR
Inline error check.
Definition: definitions.h:602
MultiIndex Tag for field name.
virtual const Problem * get_problem(const std::string &problem_name) const =0
Get the problem object.
MoFEMErrorCode exchangeFieldData(const std::string field_name, int verb=DEFAULT_VERBOSITY)
Exchange field data.
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:413
virtual MPI_Comm & get_comm() const =0
virtual const Field * get_field_structure(const std::string &name)=0
get field structure
MoFEMErrorCode getNumberOfElementsByNameAndPart(MPI_Comm comm, const std::string name, PetscLayout *layout) const
Get number of finite elements by name on processors.
CommInterface(const MoFEM::Core &core)
static const MOFEMuuid IDD_MOFEMComm