v0.14.0
Schur.cpp
Go to the documentation of this file.
1 /**
2  * @file Schur.cpp
3  * @brief Implementation of Schur Complement
4  * @date 2023-02-03
5  *
6  * @copyright Copyright (c) 2023
7  *
8  */
9 
10 namespace MoFEM {
11 
12 constexpr bool debug_schur = false;
13 
15  OpSchurAssembleBaseImpl() = delete;
16 
17 protected:
19 
20  /**
21  * @brief Assemble Schur complement
22  *
23  */
25 
26  Mat S,
27 
28  const UId &uid_row, VectorInt &row_ind,
29 
30  const UId &uid_col, VectorInt &col_ind,
31 
32  MatrixDouble &m, InsertMode iora
33 
34  );
35 };
36 
37 // Assemble specialisations
39 
40  Mat S,
41 
42  const UId &uid_row, VectorInt &row_ind,
43 
44  const UId &uid_col, VectorInt &col_ind,
45 
46  MatrixDouble &m, InsertMode iora
47 
48 ) {
50 
51  S, row_ind.size(), &*row_ind.begin(), col_ind.size(), &*col_ind.begin(),
52  &*m.data().begin(), iora
53 
54  );
55 }
56 
57 /**
58  * @brief Clear Schur complement internal data
59  *
60  */
62 
64 
65  MoFEMErrorCode doWork(int side, EntityType type,
67 };
68 
69 /**
70  * @brief Assemble Schur complement (Implementation)
71  *
72  */
73 template <typename OP_SCHUR_ASSEMBLE_BASE>
75 
77 
78  /**
79  * @brief Construct a new Op Schur Assemble End object
80  *
81  * @param fields_name list of fields
82  * @param field_ents list of entities on which schur complement is applied
83  * (can be empty)
84  * @param schur_ao map indices to schur matrix indices
85  * @param schur_mat schur matrix
86  * @param sym_schur true if schur is symmetric
87  * @param symm_op true if block diagonal is symmetric
88  */
90 
91  std::vector<std::string> fields_name,
92  std::vector<boost::shared_ptr<Range>> field_ents,
93 
94  SmartPetscObj<AO> schur_ao, SmartPetscObj<Mat> schur_mat,
95 
96  bool sym_schur, bool symm_op);
97 
98 protected:
99  template <typename I>
101 
102  int side, EntityType type, EntitiesFieldData::EntData &data
103 
104  );
105 
106  std::vector<std::string> fieldsName;
107  std::vector<boost::shared_ptr<Range>> fieldEnts;
110  bool symSchur;
111 
115 };
116 
117 struct SchurDSYSV; ///< SY symmetric
118 struct SchurDGESV; ///< GE general (i.e., nonsymmetric, in some cases
119  ///< rectangular)
120 
121 /**
122  * @brief Assemble Schur complement
123  *
124  */
125 template <typename I> struct OpSchurAssembleEnd;
126 
127 template <>
129  : public OpSchurAssembleEndImpl<OpSchurAssembleBaseImpl> {
131  MoFEMErrorCode doWork(int side, EntityType type,
133 };
134 
135 template <>
137  : public OpSchurAssembleEndImpl<OpSchurAssembleBaseImpl> {
139  MoFEMErrorCode doWork(int side, EntityType type,
141 };
142 
143 /**
144  * @brief Schur complement data storage
145  *
146  */
147 struct SchurElemMats : public boost::enable_shared_from_this<SchurElemMats> {
148 
149  SchurElemMats(const size_t idx, const UId uid_row, const UId uid_col);
150  virtual ~SchurElemMats() = default;
151 
152  const size_t iDX;
155 
156  inline auto &getMat() const { return locMats[iDX]; }
157  inline auto &getRowInd() const { return rowIndices[iDX]; }
158  inline auto &getColInd() const { return colIndices[iDX]; }
159 
160  static MoFEMErrorCode MatSetValues(Mat M,
161  const EntitiesFieldData::EntData &row_data,
162  const EntitiesFieldData::EntData &col_data,
163  const MatrixDouble &mat, InsertMode iora);
164 
165 protected:
166  static MoFEMErrorCode
168  const EntitiesFieldData::EntData &col_data,
169  const MatrixDouble &mat, InsertMode iora);
170 
171  friend struct OpSchurAssembleBegin;
172  template <typename OP_SCHUR_ASSEMBLE_BASE>
173  friend struct OpSchurAssembleEndImpl;
174 
175  struct uid_mi_tag {};
176  struct col_mi_tag {};
177 
178  using SchurElemStorage = multi_index_container<
179  const SchurElemMats *,
180  indexed_by<
181 
182  ordered_unique<
183  tag<uid_mi_tag>,
184  composite_key<
186 
187  member<SchurElemMats, const UId, &SchurElemMats::uidRow>,
188  member<SchurElemMats, const UId, &SchurElemMats::uidCol>
189 
190  >>,
191 
192  ordered_non_unique<tag<col_mi_tag>, member<SchurElemMats, const UId,
194 
195  >>;
196 
197  static boost::ptr_vector<MatrixDouble> locMats;
198  static boost::ptr_vector<VectorInt> rowIndices;
199  static boost::ptr_vector<VectorInt> colIndices;
200  static boost::ptr_vector<SchurElemMats> schurElemMats;
201  static size_t maxIndexCounter;
202 
204 };
205 
207 
208  virtual ~DiagBlockIndex() = default;
209 
210  /**
211  * @brief block data indexes
212  *
213  */
214  struct Indexes {
215 
217  int nb_cols, int loc_row, int loc_col, int mat_shift)
221 
222  inline UId getRowUId() const { return uid_row; }
223  inline UId getColUId() const { return uid_col; }
224  inline UId getFEUId() const { return uid_fe; }
225  inline int getRow() const { return row; }
226  inline int getCol() const { return col; }
227  inline int getNbRows() const { return nb_rows; }
228  inline int getNbCols() const { return nb_cols; }
229  inline int getLocRow() const { return loc_row; }
230  inline int getLocCol() const { return loc_col; }
231  inline int &getMatShift() const { return mat_shift; }
232 
233  inline int rowShift() const {
234  return getRow() + getNbRows();
235  } // shift such that lower bound is included
236 
237  inline int colShift() const {
238  return getCol() + getNbCols();
239  } // shift such that lower bound is included
240 
241  private:
245  int row;
246  int col;
247  int nb_rows;
248  int nb_cols;
249  int loc_row;
250  int loc_col;
251  mutable int mat_shift;
252  };
253 
254  using BlockIndex = multi_index_container<
255 
256  Indexes,
257 
258  indexed_by<
259 
260  ordered_unique<
261 
262  composite_key<Indexes,
263 
264  const_mem_fun<Indexes, UId, &Indexes::getRowUId>,
265  const_mem_fun<Indexes, UId, &Indexes::getColUId>>
266 
267  >,
268 
269  ordered_non_unique<
270 
271  const_mem_fun<Indexes, UId, &Indexes::getFEUId>
272 
273  >
274 
275  >>;
276 
277  BlockIndex blockIndex; ///< blocks indexes storage
278 };
279 
281 
284 
285  boost::shared_ptr<std::vector<double>> dataBlocksPtr;
286  boost::shared_ptr<std::vector<double>> preconditionerBlocksPtr;
287  boost::shared_ptr<std::vector<double>> parentBlockStructurePtr;
288 
290 };
291 
298 
300  PetscLogEventRegister("schurMatSetVal", 0, &MOFEM_EVENT_schurMatSetValues);
301  PetscLogEventRegister("opSchurAsmEnd", 0, &MOFEM_EVENT_opSchurAssembleEnd);
302  PetscLogEventRegister("blockSetVal", 0, &MOFEM_EVENT_BlockStructureSetValues);
303  PetscLogEventRegister("blockMult", 0, &MOFEM_EVENT_BlockStructureMult);
304  PetscLogEventRegister("blockSolve", 0, &MOFEM_EVENT_BlockStructureSolve);
305  PetscLogEventRegister("schurZeroRandC", 0, &MOFEM_EVENT_zeroRowsAndCols);
306 }
307 
309 boost::ptr_vector<MatrixDouble> SchurElemMats::locMats;
310 boost::ptr_vector<VectorInt> SchurElemMats::rowIndices;
311 boost::ptr_vector<VectorInt> SchurElemMats::colIndices;
312 boost::ptr_vector<SchurElemMats> SchurElemMats::schurElemMats;
314 
315 SchurElemMats::SchurElemMats(const size_t idx, const UId uid_row,
316  const UId uid_col)
317  : iDX(idx), uidRow(uid_row), uidCol(uid_col) {}
318 
321  const EntitiesFieldData::EntData &col_data,
322  const MatrixDouble &mat, InsertMode iora) {
324 
325 #ifndef NDEBUG
326  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_schurMatSetValues, 0, 0, 0, 0);
327 #endif // NDEBUG
328 
329  // get row indices, in case of store, get indices from storage
330  // storage keeps marked indices to manage boundary conditions
331  auto get_row_indices = [&]() -> const VectorInt & {
332  if (auto e_ptr = row_data.getFieldEntities()[0]) {
333  if (auto stored_data_ptr =
334  e_ptr->getSharedStoragePtr<EssentialBcStorage>()) {
335  return stored_data_ptr->entityIndices;
336  }
337  }
338  return row_data.getIndices();
339  };
340 
341  const auto &row_ind = get_row_indices();
342  const auto &col_ind = col_data.getIndices();
343 
344  const auto nb_rows = row_ind.size();
345  const auto nb_cols = col_ind.size();
346 
347 #ifndef NDEBUG
348  if (mat.size1() != nb_rows) {
349  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
350  "Wrong mat size %d != %d", mat.size1(), nb_rows);
351  }
352  if (mat.size2() != nb_cols) {
353  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
354  "Wrong mat size %d != %d", mat.size2(), nb_cols);
355  }
356 #endif // NDEBUG
357 
358  // get entity uid
359  auto get_uid = [](auto &data) {
360  if (data.getFieldEntities().size() == 1) {
361 
362  return data.getFieldEntities()[0]->getLocalUniqueId();
363 
364  } else {
365 
366  // Is assumed that sum of entities ids gives unique id, that is not true,
367  // but corner case is improbable.
368 
369  // @todo: debug version should have test
370 
371  auto &uid0 = data.getFieldEntities()[0]->getLocalUniqueId();
372  auto field_id0 = FieldEntity::getFieldBitNumberFromUniqueId(uid0);
373  auto ent0 = FieldEntity::getHandleFromUniqueId(uid0);
374  auto type0 = type_from_handle(ent0);
375  auto id = id_from_handle(ent0);
376 
377  for (auto i = 1; i < data.getFieldEntities().size(); ++i) {
378 
379  // get entity id from ent
380  id += id_from_handle(
381 
382  // get entity handle from unique uid
384  data.getFieldEntities()[i]->getLocalUniqueId())
385 
386  );
387  }
388 
390  field_id0,
391 
392  ent_form_type_and_id(type0, id)
393 
394  );
395  }
396  };
397 
398  auto uid_row = get_uid(row_data);
399  auto uid_col = get_uid(col_data);
400 
401  auto it =
402  SchurElemMats::schurL2Storage.template get<SchurElemMats::uid_mi_tag>()
403  .find(boost::make_tuple(uid_row, uid_col));
404 
405  if (it ==
406  SchurElemMats::schurL2Storage.template get<SchurElemMats::uid_mi_tag>()
407  .end()) {
408 
409  // get size of arrays of matrices
410  const auto size = SchurElemMats::locMats.size();
411 
412  // expand memory allocation
413  if (SchurElemMats::maxIndexCounter == size) {
414  SchurElemMats::locMats.push_back(new MatrixDouble());
415  SchurElemMats::rowIndices.push_back(new VectorInt());
416  SchurElemMats::colIndices.push_back(new VectorInt());
418  new SchurElemMats(SchurElemMats::maxIndexCounter, uid_row, uid_col));
419  } else {
421  uid_row;
423  uid_col;
424  }
425 
426  // add matrix to storage
427  auto p = SchurElemMats::schurL2Storage.emplace(
429 #ifndef NDEBUG
430  if (!p.second) {
431  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "Failed to insert");
432  }
433 #endif // NDEBUG
434 
435  auto asmb = [&](auto &sm) {
436  sm.resize(nb_rows, nb_cols, false);
437  noalias(sm) = mat;
438  };
439 
440  asmb((*p.first)->getMat());
441 
442  auto add_indices = [](auto &storage, auto &ind) {
443  storage.resize(ind.size(), false);
444  noalias(storage) = ind;
445  };
446 
447  add_indices((*p.first)->getRowInd(), row_ind);
448  add_indices((*p.first)->getColInd(), col_ind);
449 
450  } else {
451  // entry (submatrix) already exists
452 
453  auto asmb = [&](auto &sm) {
455 
456 #ifndef NDEBUG
457  if (sm.size1() != nb_rows) {
458  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
459  "Wrong mat or storage size %d != %d", sm.size1(), nb_rows);
460  }
461  if (sm.size2() != nb_cols) {
462  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
463  "Wrong mat or storage size %d != %d", sm.size2(), nb_cols);
464  }
465 #endif // NDEBUG
466 
467  switch (iora) {
468  case ADD_VALUES:
469  sm += mat;
470  break;
471  case INSERT_VALUES:
472  noalias(sm) = mat;
473  break;
474  default:
475  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED,
476  "Assembly type not implemented");
477  }
479  };
480 
481  CHKERR asmb((*it)->getMat());
482 
483  // no need to set indices
484  }
485 
486 #ifndef NDEBUG
487  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_schurMatSetValues, 0, 0, 0, 0);
488 #endif // NDEBUG
489 
491 }
492 
494  : OpSchurAssembleBaseImpl(NOSPACE, OPSPACE) {}
495 
499 #ifndef NDEBUG
500  if constexpr (debug_schur)
501  MOFEM_LOG("SELF", Sev::noisy) << "Schur assemble begin";
502 #endif
505 
507 }
508 
509 template <typename OP_SCHUR_ASSEMBLE_BASE>
511  std::vector<std::string> fields_name,
512  std::vector<boost::shared_ptr<Range>> field_ents,
513  SmartPetscObj<AO> schur_ao, SmartPetscObj<Mat> schur_mat, bool sym_schur,
514  bool symm_op)
515  : OP(NOSPACE, OP::OPSPACE, symm_op), fieldsName(fields_name),
516  fieldEnts(field_ents), schurAO(schur_ao), schurMat(schur_mat),
517  symSchur(sym_schur) {}
518 
519 template <typename OP_SCHUR_ASSEMBLE_BASE>
520 template <typename I>
522  int side, EntityType type, EntitiesFieldData::EntData &data) {
523 
525 
526 #ifndef NDEBUG
527  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_opSchurAssembleEnd, 0, 0, 0, 0);
528 #endif
529 
530 #ifndef NDEBUG
531  if constexpr (debug_schur)
532  MOFEM_LOG("SELF", Sev::noisy) << "Schur assemble begin -> end";
533 #endif
534 
535  auto get_field_name = [&](auto uid) {
536  return OP::getPtrFE()->mField.get_field_name(field_bit_from_bit_number(
537  FieldEntity::getFieldBitNumberFromUniqueId(uid)));
538  };
539 
540  auto get_a00_uids = [&]() {
541  auto get_field_bit = [&](auto &name) {
542  return OP::getPtrFE()->mField.get_field_bit_number(name);
543  };
544 
545  std::vector<std::pair<UId, UId>> a00_uids;
546  a00_uids.reserve(fieldsName.size());
547  for (auto ss = 0; ss != fieldsName.size(); ++ss) {
548  auto field_bit = get_field_bit(fieldsName[ss]);
549  auto row_ents = fieldEnts[ss];
550  if (row_ents) {
551  for (auto p = row_ents->pair_begin(); p != row_ents->pair_end(); ++p) {
552  auto lo_uid =
553  FieldEntity::getLoLocalEntityBitNumber(field_bit, p->first);
554  auto hi_uid =
555  FieldEntity::getHiLocalEntityBitNumber(field_bit, p->second);
556  a00_uids.push_back(std::make_pair(lo_uid, hi_uid));
557  }
558  } else {
559  auto lo_uid = FieldEntity::getLoLocalEntityBitNumber(
560  field_bit, get_id_for_min_type<MBVERTEX>());
561  auto hi_uid = FieldEntity::getHiLocalEntityBitNumber(
562  field_bit, get_id_for_max_type<MBENTITYSET>());
563  a00_uids.push_back(std::make_pair(lo_uid, hi_uid));
564  }
565  }
566  return a00_uids;
567  };
568 
569 #ifndef NDEBUG
570  auto list_storage = [&](auto &storage) {
572  int i = 0;
573  for (auto &p : storage) {
574  MOFEM_LOG("SELF", Sev::noisy)
575  << "List schur storage: " << i << " " << p->iDX << ": "
576  << get_field_name(p->uidRow) << " " << get_field_name(p->uidCol)
577  << " : " << p->getMat().size1() << " " << p->getMat().size2();
578  ++i;
579  }
581  };
582 #endif // NDEBUG
583 
584  auto assemble_dense_blocks = [&]() {
585  using matrix_range = ublas::matrix_range<MatrixDouble>;
586  using range = ublas::range;
588  auto &storage = SchurElemMats::schurL2Storage;
589 
590  auto assemble_schur = [this](auto &m, auto &uid_row, auto &uid_col,
591  auto *row_ind_ptr, auto *col_ind_ptr) {
593 
594  if (schurMat) {
595 
596  if (auto ierr = this->assembleSchurMat(
597 
598  schurMat, uid_row, *row_ind_ptr, uid_col, *col_ind_ptr, m,
599  ADD_VALUES
600 
601  )) {
602 #ifndef NDEBUG
603  auto field_ents = OP::getPtrFE()->mField.get_field_ents();
604  auto row_ent_it = field_ents->find(uid_row);
605  auto col_ent_it = field_ents->find(uid_col);
606  MOFEM_LOG_CHANNEL("SELF");
607  if (row_ent_it != field_ents->end())
608  MOFEM_LOG("SELF", Sev::error)
609  << "Assemble row entity: " << (*row_ent_it)->getName() << " "
610  << (*col_ent_it)->getEntTypeName() << " side "
611  << (*row_ent_it)->getSideNumber();
612  if (col_ent_it != field_ents->end())
613  MOFEM_LOG("SELF", Sev::error)
614  << "Assemble col entity: " << (*col_ent_it)->getName() << " "
615  << (*col_ent_it)->getEntTypeName() << " side "
616  << (*col_ent_it)->getSideNumber();
617 #endif // NDEBUG
618  CHK_THROW_MESSAGE(ierr, "MatSetValues");
619  }
620  }
621 
623  };
624 
625  auto a00_uids = get_a00_uids();
626 
627  auto get_block_indexing = [&](auto &a00_uids) {
628  // iterate over a00 uids and find blocks
629  std::vector<const SchurElemMats *> block_list;
630  block_list.reserve(storage.size());
631  for (auto &rp_uid : a00_uids) {
632  auto [rlo_uid, rhi_uid] = rp_uid;
633  for (auto &cp_uid : a00_uids) {
634  auto [clo_uid, chi_uid] = cp_uid;
635 
636  auto it =
637  storage.template get<SchurElemMats::uid_mi_tag>().lower_bound(
638  boost::make_tuple(rlo_uid, clo_uid));
639  auto hi_it =
640  storage.template get<SchurElemMats::uid_mi_tag>().upper_bound(
641  boost::make_tuple(rhi_uid, chi_uid));
642 
643  for (; it != hi_it; ++it) {
644  if ((*it)->uidRow >= rlo_uid && (*it)->uidRow < rhi_uid &&
645  (*it)->uidCol >= clo_uid && (*it)->uidCol < chi_uid) {
646  block_list.push_back(*it);
647  }
648  }
649  }
650  }
651 
652  // create block indexes map for blockMat
653  std::map<UId, std::pair<size_t, const VectorInt *>>
654  block_indexing; // uid block map
655  for (auto d : block_list) {
656  if (block_indexing.find(d->uidRow) == block_indexing.end()) {
657  block_indexing[d->uidRow] =
658  std::make_pair(d->getRowInd().size(), &(d->getRowInd()));
659  }
660  }
661 
662  // set indexes to block
663  int mat_block_size = 0; // size of block matrix
664  for (auto &p_uid : a00_uids) {
665  auto [lo_uid, hi_uid] = p_uid;
666  auto lo = block_indexing.lower_bound(lo_uid);
667  auto up = block_indexing.upper_bound(hi_uid);
668  for (; lo != up; ++lo) {
669  lo->second.first = mat_block_size;
670  mat_block_size += lo->second.second->size();
671  }
672  }
673 
674  return std::make_tuple(block_list, block_indexing, mat_block_size);
675  };
676 
677  auto get_schur_block_list = [&](auto &block_indexing) {
678  std::vector<const SchurElemMats *> block_list;
679  block_list.reserve(storage.size());
680  for (auto &s : storage) {
681  if (block_indexing.find(s->uidRow) == block_indexing.end() &&
682  block_indexing.find(s->uidCol) == block_indexing.end()) {
683  block_list.push_back(s);
684  }
685  }
686  return block_list;
687  };
688 
689  auto [block_list, block_indexing, block_mat_size] =
690  get_block_indexing(a00_uids);
691 
692  if (block_mat_size == 0) {
693 
694  if (schurAO) {
695  for (auto &s : storage) {
696  CHKERR AOApplicationToPetsc(schurAO, s->getRowInd().size(),
697  &*s->getRowInd().begin());
698  CHKERR AOApplicationToPetsc(schurAO, s->getColInd().size(),
699  &*s->getColInd().begin());
700  }
701  }
702 
703  for (auto &s : storage) {
704  auto &m = s->getMat();
705  CHKERR assemble_schur(m, s->uidRow, s->uidCol, &(s->getRowInd()),
706  &(s->getColInd()));
707  }
709  }
710 
711  blockMat.resize(block_mat_size, block_mat_size, false);
712  blockMat.clear();
713 
714  auto get_range = [](auto &bi) {
715  return range(bi.first, bi.first + bi.second->size());
716  };
717 
718  for (auto &s : block_list) {
719  auto &m = s->getMat();
720 #ifndef NDEBUG
721  if (block_indexing.find(s->uidRow) == block_indexing.end())
722  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "Wrong rlo_uid");
723  if (block_indexing.find(s->uidCol) == block_indexing.end())
724  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "Wrong clo_uid");
725 #endif // NDEBUG
726 
727  auto &rbi = block_indexing.at(s->uidRow);
728  auto &cbi = block_indexing.at(s->uidCol);
729 
730  auto sub_mat = matrix_range(blockMat, get_range(rbi), get_range(cbi));
731  sub_mat = m;
732  }
733 
734  auto get_zeroed_indices = [&](auto extractor_uid, auto extractor_ind) {
735  std::vector<int> zeroed_indices;
736  zeroed_indices.reserve(block_mat_size);
737  for (auto &s : block_list) {
738  auto &bi = block_indexing.at(extractor_uid(s));
739  auto &ind = extractor_ind(s);
740  for (auto it = ind.begin(); it != ind.end(); ++it) {
741  if (*it < 0) {
742  auto idx = bi.first + std::distance(ind.begin(), it);
743  zeroed_indices.push_back(idx);
744  }
745  }
746  }
747  std::sort(zeroed_indices.begin(), zeroed_indices.end());
748  auto it = std::unique(zeroed_indices.begin(), zeroed_indices.end());
749  zeroed_indices.resize(std::distance(zeroed_indices.begin(), it));
750  return zeroed_indices;
751  };
752  auto zero_rows = get_zeroed_indices(
753  [](auto &s) { return s->uidRow; },
754  [](auto &s) -> VectorInt & { return s->getRowInd(); });
755  auto zero_cols = get_zeroed_indices(
756  [](auto &s) { return s->uidCol; },
757  [](auto &s) -> VectorInt & { return s->getColInd(); });
758 
759  for (auto i : zero_rows) {
760  for (auto j = 0; j != blockMat.size2(); ++j) {
761  blockMat(i, j) = 0;
762  }
763  }
764  for (auto j : zero_cols) {
765  for (auto i = 0; i != blockMat.size1(); ++i) {
766  blockMat(i, j) = 0;
767  }
768  }
769  for (auto i : zero_rows) {
770  blockMat(i, i) = 1;
771  }
772 
773  CHKERR I::invertMat(blockMat, invMat);
774 
775  // clear storage and block list from a00 blocks, no more needed
776  for (auto &s : block_list) {
777  auto it = storage.template get<SchurElemMats::uid_mi_tag>().find(
778  boost::make_tuple(s->uidRow, s->uidCol));
779  storage.template get<SchurElemMats::uid_mi_tag>().erase(it);
780  }
781  block_list.clear();
782 
783  if (schurAO) {
784  for (auto &s : storage) {
785  if (block_indexing.find(s->uidRow) == block_indexing.end()) {
786  CHKERR AOApplicationToPetsc(schurAO, s->getRowInd().size(),
787  &*s->getRowInd().begin());
788  }
789  if (block_indexing.find(s->uidCol) == block_indexing.end()) {
790  CHKERR AOApplicationToPetsc(schurAO, s->getColInd().size(),
791  &*s->getColInd().begin());
792  }
793  }
794  }
795 
796  auto schur_block_list = get_schur_block_list(block_indexing);
797  for(auto &s : schur_block_list) {
798  auto &m = s->getMat();
799  CHKERR assemble_schur(m, s->uidRow, s->uidCol, &(s->getRowInd()),
800  &(s->getColInd()));
801  }
802  for (auto &s : schur_block_list) {
803  auto it = storage.template get<SchurElemMats::uid_mi_tag>().find(
804  boost::make_tuple(s->uidRow, s->uidCol));
805  storage.template get<SchurElemMats::uid_mi_tag>().erase(it);
806  }
807  schur_block_list.clear();
808 
809  for (
810 
811  auto rp_uid_it = a00_uids.begin(); rp_uid_it != a00_uids.end();
812  ++rp_uid_it
813 
814  ) {
815  auto [rlo_uid, rhi_uid] = *rp_uid_it;
816  for (auto rm = block_indexing.lower_bound(rlo_uid);
817  rm != block_indexing.upper_bound(rhi_uid); ++rm) {
818  auto &rbi = rm->second;
819 
820  auto a_lo_tmp =
821  storage.template get<SchurElemMats::col_mi_tag>().lower_bound(
822  rm->first);
823  auto a_hi =
824  storage.template get<SchurElemMats::col_mi_tag>().upper_bound(
825  rm->first);
826 
827  for (
828 
829  auto cp_uid_it = (symSchur) ? rp_uid_it : a00_uids.begin();
830  cp_uid_it != a00_uids.end(); ++cp_uid_it
831 
832  ) {
833  auto [clo_uid, chi_uid] = *cp_uid_it;
834  for (auto cm = block_indexing.lower_bound(clo_uid);
835  cm != block_indexing.upper_bound(chi_uid); ++cm) {
836  auto &cbi = cm->second;
837 
838  auto c_lo_tmp =
839  storage.template get<SchurElemMats::uid_mi_tag>().lower_bound(
840  boost::make_tuple(cm->first, 0));
841  auto c_hi =
842  storage.template get<SchurElemMats::uid_mi_tag>().upper_bound(
843  boost::make_tuple(cm->first, FieldEntity::getHiBitNumberUId(
844  BITFIELDID_SIZE - 1)));
845 
846  auto sub_inv_mat =
847  matrix_range(invMat, get_range(rbi), get_range(cbi));
848  bM.resize(sub_inv_mat.size1(), sub_inv_mat.size2());
849  noalias(bM) = sub_inv_mat;
850 
851  for (auto a_lo = a_lo_tmp; a_lo != a_hi; ++a_lo) {
852 #ifndef NDEBUG
853  if (block_indexing.find((*a_lo)->uidRow) != block_indexing.end())
854  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
855  "Wrong a_lo->uidRow");
856 #endif
857 
858  auto &a = (*a_lo)->getMat();
859  abM.resize(a.size1(), bM.size2(), false);
860  abM.clear();
861  cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
862 
863  a.size1(), bM.size2(), a.size2(), 1.,
864 
865  &*a.data().begin(), a.size2(),
866 
867  &*bM.data().begin(), bM.size2(), 0.,
868 
869  &*abM.data().begin(), abM.size2());
870 
871  for (auto c_lo = c_lo_tmp; c_lo != c_hi; ++c_lo) {
872 #ifndef NDEBUG
873  if (block_indexing.find((*c_lo)->uidCol) !=
874  block_indexing.end())
875  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
876  "Wrong a_lo->uidRow");
877 #endif
878 
879  auto &c = (*c_lo)->getMat();
880 
881  abcM.resize(abM.size1(), c.size2(), false);
882  abcM.clear();
883 
884  cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
885  abM.size1(), c.size2(), abM.size2(), -1.,
886 
887  &*abM.data().begin(), abM.size2(),
888 
889  &*c.data().begin(), c.size2(), 0.,
890 
891  &*abcM.data().begin(), abcM.size2());
892 
893  CHKERR assemble_schur(abcM, (*a_lo)->uidRow, (*c_lo)->uidCol,
894  &((*a_lo)->getRowInd()),
895  &((*c_lo)->getColInd()));
896 
897  if (symSchur && rp_uid_it != cp_uid_it) {
898  abcM = trans(abcM);
899  CHKERR assemble_schur(abcM, (*c_lo)->uidCol, (*a_lo)->uidRow,
900  &((*c_lo)->getColInd()),
901  &((*a_lo)->getRowInd()));
902  }
903  }
904  }
905  }
906  }
907  }
908  }
909 
911  };
912 
913  // Assemble Schur complements
914  CHKERR assemble_dense_blocks();
915 
916 #ifndef NDEBUG
917  if constexpr (debug_schur)
918  MOFEM_LOG("SELF", Sev::noisy) << "Schur assemble done";
919 #endif
920 
921 #ifndef NDEBUG
922  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_opSchurAssembleEnd, 0, 0, 0, 0);
923 #endif
924 
926 }
927 
928 struct SchurDSYSV {
929 
930  static auto invertMat(MatrixDouble &m, MatrixDouble &inv) {
932 
933  const auto nb = m.size1();
934 #ifndef NDEBUG
935  if (nb != m.size2()) {
936  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
937  "It should be square matrix %d != %d", nb, m.size2());
938  }
939 #endif
940 
941  inv.resize(nb, nb, false);
942  inv.swap(m);
943  m.resize(2 * nb, 2 * nb, false);
944 
945  VectorInt ipiv(nb);
946  int info;
947 
948  // FIXME: add lapack_dsytrf and lapack_dsytri
949 
950  info = lapack_dgetrf(nb, nb, &*inv.data().begin(), nb, &*ipiv.begin());
951  if (info)
952  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
953  "lapack error info = %d", info);
954  info = lapack_dgetri(nb, &*inv.data().begin(), nb, &*ipiv.begin(),
955  &*m.data().begin(), m.data().size());
956  if (info)
957  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
958  "lapack error info = %d", info);
959 
961  }
962 };
963 
964 struct SchurDGESV {
965 
966  static auto invertMat(MatrixDouble &m, MatrixDouble &inv) {
968 
969  const auto nb = m.size1();
970 #ifndef NDEBUG
971  if (nb != m.size2()) {
972  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
973  "It should be square matrix %d != %d", nb, m.size2());
974  }
975 #endif
976 
977  inv.resize(nb, nb, false);
978  inv.swap(m);
979 
980  VectorInt ipiv(nb);
981  int info;
982 
983  info = lapack_dgetrf(nb, nb, &*inv.data().begin(), nb, &*ipiv.begin());
984  if (info)
985  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
986  "lapack error info = %d", info);
987  info = lapack_dgetri(nb, &*inv.data().begin(), nb, &*ipiv.begin(),
988  &*m.data().begin(), m.data().size());
989  if (info)
990  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
991  "lapack error info = %d", info);
992 
994  }
995 };
996 
1000  return doWorkImpl<SchurDSYSV>(side, type, data);
1001 }
1002 
1006  return doWorkImpl<SchurDGESV>(side, type, data);
1007 }
1008 
1009 boost::shared_ptr<BlockStructure> createBlockMatStructure(
1010 
1011  DM dm, //< dm
1012  SchurFEOpsFEandFields schur_fe_op_vec //< block elements
1013 
1014 ) {
1015 
1016  auto cmp_uid_lo = [](const boost::weak_ptr<FieldEntity> &a, const UId &b) {
1017  if (auto a_ptr = a.lock()) {
1018  if (a_ptr->getLocalUniqueId() < b)
1019  return true;
1020  else
1021  return false;
1022  } else {
1023  return false;
1024  }
1025  };
1026 
1027  auto cmp_uid_hi = [](const UId &b, const boost::weak_ptr<FieldEntity> &a) {
1028  if (auto a_ptr = a.lock()) {
1029  if (b < a_ptr->getLocalUniqueId())
1030  return true;
1031  else
1032  return false;
1033  } else {
1034  return true;
1035  }
1036  };
1037 
1038  // get uids for fields
1039  auto get_uid_pair = [](const auto &field_id) {
1040  auto lo_uid = FieldEntity::getLocalUniqueIdCalculate(
1041  field_id, get_id_for_min_type<MBVERTEX>());
1042  auto hi_uid = FieldEntity::getLocalUniqueIdCalculate(
1043  field_id, get_id_for_max_type<MBENTITYSET>());
1044  return std::make_pair(lo_uid, hi_uid);
1045  };
1046 
1047  // get uids pair
1048  auto get_it_pair = [cmp_uid_lo, cmp_uid_hi](auto &&field_ents, auto &&p_uid) {
1049  auto lo = std::lower_bound(field_ents.begin(), field_ents.end(),
1050  p_uid.first, cmp_uid_lo);
1051  auto hi = std::upper_bound(field_ents.begin(), field_ents.end(),
1052  p_uid.second, cmp_uid_hi);
1053  return std::make_pair(lo, hi);
1054  };
1055 
1056  // extract DOFs for rows/columns. DOFs are associated with fields entities
1057  // for given problem.
1058  auto row_extractor = [](auto &e) { return e->entityCacheRowDofs; };
1059  auto col_extractor = [](auto &e) { return e->entityCacheColDofs; };
1060 
1061  auto extract_data = [](auto &&its, auto extractor) {
1062  std::vector<std::tuple<UId, int, int, int>> data;
1063  data.reserve(std::distance(its.first, its.second));
1064  // iterate field dofs
1065  for (; its.first != its.second; ++its.first) {
1066  if (auto e = its.first->lock()) {
1067  if (auto cache = extractor(e).lock()) {
1068  int nb_dofs = 0;
1069  for (auto dof = cache->loHi[0]; dof != cache->loHi[1]; ++dof) {
1070  if ((*dof)->getPetscGlobalDofIdx() != -1)
1071  ++nb_dofs;
1072  }
1073  auto uid = e->getLocalUniqueId();
1074  if (nb_dofs) {
1075 
1076  auto glob = (*cache->loHi[0])->getPetscGlobalDofIdx();
1077  auto loc = (*cache->loHi[0])->getPetscLocalDofIdx();
1078  while (glob == -1 && cache->loHi[0] != cache->loHi[1]) {
1079  ++cache->loHi[0];
1080  glob = (*cache->loHi[0])->getPetscGlobalDofIdx();
1081  loc = (*cache->loHi[0])->getPetscLocalDofIdx();
1082  }
1083  data.emplace_back(uid, glob, nb_dofs, loc);
1084 
1085 #ifndef NDEBUG
1086 
1087  for (auto lo = cache->loHi[0]; lo != cache->loHi[1]; ++lo) {
1088  auto glob = (*lo)->getPetscGlobalDofIdx();
1089  if (glob == -1) {
1091  "Wrong global index");
1092  }
1093  }
1094 
1095 #endif
1096  } else {
1097  data.emplace_back(uid, -1, 0, -1);
1098  }
1099  }
1100  }
1101  }
1102  return data;
1103  };
1104 
1105  auto data_ptr = boost::make_shared<BlockStructure>();
1106  auto m_field_ptr = getInterfacePtr(dm);
1107 
1108  // create element to extract data
1109  auto fe_method = boost::shared_ptr<MoFEM::FEMethod>(new MoFEM::FEMethod());
1110 
1111  for (auto &d : schur_fe_op_vec) {
1112 
1113  // extract bit numbers for row and column
1114  auto get_bit_numbers = [&d](auto op) {
1115  std::vector<FieldBitNumber> bit_numbers(d.second.size());
1116  std::transform(d.second.begin(), d.second.end(), bit_numbers.begin(), op);
1117  return bit_numbers;
1118  };
1119 
1120  // extract bit numbers for row
1121  auto row_bit_numbers = get_bit_numbers(
1122  [&](auto &p) { return m_field_ptr->get_field_bit_number(p.first); });
1123  // extract bit numbers for row
1124  auto col_bit_numbers = get_bit_numbers(
1125  [&](auto &p) { return m_field_ptr->get_field_bit_number(p.second); });
1126 
1127  fe_method->preProcessHook = []() { return 0; };
1128  fe_method->postProcessHook = []() { return 0; };
1129  fe_method->operatorHook = [&]() {
1131 
1132  auto fe_uid = fe_method->numeredEntFiniteElementPtr->getLocalUniqueId();
1133 
1134  for (auto f = 0; f != row_bit_numbers.size(); ++f) {
1135 
1136  auto row_data =
1137  extract_data(get_it_pair(fe_method->getRowFieldEnts(),
1138  get_uid_pair(row_bit_numbers[f])),
1139  row_extractor);
1140  auto col_data =
1141  extract_data(get_it_pair(fe_method->getColFieldEnts(),
1142  get_uid_pair(col_bit_numbers[f])),
1143  col_extractor);
1144 
1145  for (auto &r : row_data) {
1146  auto [r_uid, r_glob, r_nb_dofs, r_loc] = r;
1147  for (auto &c : col_data) {
1148  auto [c_uid, c_glob, c_nb_dofs, c_loc] = c;
1149  data_ptr->blockIndex.insert(BlockStructure::Indexes{
1150  r_uid, c_uid, fe_uid, r_glob, c_glob, r_nb_dofs, c_nb_dofs,
1151  r_loc, c_loc, -1});
1152  }
1153  }
1154  }
1155 
1157  };
1158 
1159  CHKERR DMoFEMLoopFiniteElementsUpAndLowRank(dm, d.first, fe_method, 0,
1160  m_field_ptr->get_comm_size());
1161  };
1162 
1163  // order by column (that is for matrix multiplication)
1164  auto mem_size = 0;
1165  for (auto &v : data_ptr->blockIndex.get<0>()) {
1166  v.getMatShift() = mem_size;
1167  mem_size += v.getNbCols() * v.getNbRows();
1168  }
1169 
1170  std::vector<double> tmp;
1171  if (mem_size > tmp.max_size())
1173 
1174  data_ptr->dataBlocksPtr =
1175  boost::make_shared<std::vector<double>>(mem_size, 0);
1176 
1177  auto ghost_x = createDMVector(dm);
1178  auto ghost_y = createDMVector(dm);
1179  CHKERR VecSetDM(ghost_x, PETSC_NULL);
1180  CHKERR VecSetDM(ghost_y, PETSC_NULL);
1181 
1182  data_ptr->ghostX = ghost_x;
1183  data_ptr->ghostY = ghost_y;
1184 
1185  return data_ptr;
1186 }
1187 
1189  Mat mat, Vec x, Vec y, InsertMode iora,
1190  boost::function<
1191  int(DiagBlockIndex::BlockIndex::nth_index<0>::type::iterator)>
1192  shift_extractor,
1193  boost::shared_ptr<std::vector<double>> data_blocks_ptr,
1194  bool multiply_by_preconditioner);
1195 
1196 static MoFEMErrorCode solve_schur_block_shell(Mat mat, Vec y, Vec x,
1197  InsertMode iora);
1198 
1199 static PetscErrorCode mult(Mat mat, Vec x, Vec y) {
1200  BlockStructure *ctx;
1201  CHKERR MatShellGetContext(mat, (void **)&ctx);
1202  return mult_schur_block_shell(
1203  mat, x, y, INSERT_VALUES,
1204 
1205  [](DiagBlockIndex::BlockIndex::nth_index<0>::type::iterator it) {
1206  return it->getMatShift();
1207  },
1208 
1209  ctx->dataBlocksPtr, true);
1210 }
1211 static PetscErrorCode mult_add(Mat mat, Vec x, Vec y) {
1212  BlockStructure *ctx;
1213  CHKERR MatShellGetContext(mat, (void **)&ctx);
1214  return mult_schur_block_shell(
1215  mat, x, y, ADD_VALUES,
1216 
1217  [](DiagBlockIndex::BlockIndex::nth_index<0>::type::iterator it) {
1218  return it->getMatShift();
1219  },
1220 
1221  ctx->dataBlocksPtr, true);
1222 }
1223 static PetscErrorCode solve(Mat mat, Vec x, Vec y) {
1224  return solve_schur_block_shell(mat, x, y, INSERT_VALUES);
1225 }
1226 static PetscErrorCode solve_add(Mat mat, Vec x, Vec y) {
1227  return solve_schur_block_shell(mat, x, y, ADD_VALUES);
1228 }
1229 
1230 static PetscErrorCode zero_rows_columns(Mat A, PetscInt N,
1231  const PetscInt rows[], PetscScalar diag,
1232  Vec x, Vec b) {
1233 
1235  BlockStructure *ctx;
1236  CHKERR MatShellGetContext(A, (void **)&ctx);
1237 
1238  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_zeroRowsAndCols, 0, 0, 0, 0);
1239 
1240  using BlockIndexView = multi_index_container<
1241 
1242  const DiagBlockIndex::Indexes *,
1243 
1244  indexed_by<
1245 
1246  ordered_non_unique<
1247 
1248  const_mem_fun<DiagBlockIndex::Indexes, int,
1249  &DiagBlockIndex::Indexes::rowShift>
1250 
1251  >,
1252 
1253  ordered_non_unique<
1254 
1255  const_mem_fun<DiagBlockIndex::Indexes, int,
1256  &DiagBlockIndex::Indexes::colShift>
1257 
1258  >
1259 
1260  >>;
1261 
1262  BlockIndexView view;
1263  auto hint = view.get<0>().end();
1264  for (auto &v : ctx->blockIndex) {
1265  hint = view.insert(hint, &v);
1266  }
1267 
1268  const int *ptr = &rows[0];
1269  int is_nb_rows = N;
1270  SmartPetscObj<IS> is_local;
1271 
1272  MPI_Comm comm;
1273  CHKERR PetscObjectGetComm((PetscObject)A, &comm);
1274  int size;
1275  MPI_Comm_size(comm, &size);
1276  if (size > 1) {
1277  auto is = createISGeneral(comm, N, rows, PETSC_USE_POINTER);
1278  is_local = isAllGather(is);
1279  }
1280  if (is_local) {
1281  CHKERR ISGetSize(is_local, &is_nb_rows);
1282 #ifndef NDEBUG
1283  if constexpr (debug_schur) {
1284  CHKERR ISView(is_local, PETSC_VIEWER_STDOUT_WORLD);
1285  }
1286 #endif
1287  CHKERR ISGetIndices(is_local, &ptr);
1288  }
1289 
1290  int loc_m, loc_n;
1291  CHKERR MatGetLocalSize(A, &loc_m, &loc_n);
1292 
1293  for (auto n = 0; n != is_nb_rows; ++n) {
1294  auto row = ptr[n];
1295  auto rlo = view.get<0>().lower_bound(row);
1296  auto rhi = view.get<0>().end();
1297  for (; rlo != rhi; ++rlo) {
1298  auto r_shift = row - (*rlo)->getRow();
1299  if (r_shift >= 0 && r_shift < (*rlo)->getNbRows()) {
1300  auto *ptr = &(*ctx->dataBlocksPtr)[(*rlo)->getMatShift()];
1301  for (auto i = 0; i != (*rlo)->getNbCols(); ++i) {
1302  ptr[i + r_shift * (*rlo)->getNbCols()] = 0;
1303  }
1304  } else if ((*rlo)->getRow() + (*rlo)->getNbRows() > row) {
1305  break;
1306  }
1307  }
1308  }
1309 
1310  for (auto n = 0; n != is_nb_rows; ++n) {
1311  auto col = ptr[n];
1312  auto clo = view.get<1>().lower_bound(col);
1313  auto chi = view.get<1>().end();
1314  for (; clo != chi; ++clo) {
1315  auto c_shift = col - (*clo)->getCol();
1316  if (c_shift >= 0 && c_shift < (*clo)->getNbCols()) {
1317 
1318  auto *ptr = &(*ctx->dataBlocksPtr)[(*clo)->getMatShift()];
1319  for (auto i = 0; i != (*clo)->getNbRows(); ++i) {
1320  ptr[c_shift + i * (*clo)->getNbCols()] = 0;
1321  }
1322 
1323  // diagonal
1324  if (
1325 
1326  (*clo)->getRow() == (*clo)->getCol() &&
1327  (*clo)->getLocRow() < loc_m && (*clo)->getLocCol() < loc_n
1328 
1329  ) {
1330  auto r_shift = col - (*clo)->getCol();
1331  if (r_shift >= 0 && r_shift < (*clo)->getNbRows()) {
1332  ptr[c_shift + r_shift * (*clo)->getNbCols()] = diag;
1333  }
1334  }
1335  } else if ((*clo)->getCol() + (*clo)->getNbCols() > col) {
1336  break;
1337  }
1338  }
1339  }
1340 
1341  if (is_local) {
1342  CHKERR ISRestoreIndices(is_local, &ptr);
1343  }
1344 
1345  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_zeroRowsAndCols, 0, 0, 0, 0);
1346 
1348 }
1349 
1350 static PetscErrorCode mat_zero(Mat m) {
1352  BlockStructure *ctx;
1353  CHKERR MatShellGetContext(m, (void **)&ctx);
1354  if (ctx->dataBlocksPtr)
1355  std::fill(ctx->dataBlocksPtr->begin(), ctx->dataBlocksPtr->end(), 0.);
1356  if (ctx->preconditionerBlocksPtr)
1357  std::fill(ctx->preconditionerBlocksPtr->begin(),
1358  ctx->preconditionerBlocksPtr->end(), 0.);
1360 }
1361 
1364  CHKERR MatShellSetManageScalingShifts(mat_raw);
1365  CHKERR MatShellSetOperation(mat_raw, MATOP_MULT, (void (*)(void))mult);
1366  CHKERR MatShellSetOperation(mat_raw, MATOP_MULT_ADD,
1367  (void (*)(void))mult_add);
1368  CHKERR MatShellSetOperation(mat_raw, MATOP_SOLVE, (void (*)(void))solve);
1369  CHKERR MatShellSetOperation(mat_raw, MATOP_SOLVE_ADD,
1370  (void (*)(void))solve_add);
1371  CHKERR MatShellSetOperation(mat_raw, MATOP_ZERO_ENTRIES,
1372  (void (*)(void))mat_zero);
1373  CHKERR MatShellSetOperation(mat_raw, MATOP_ZERO_ROWS_COLUMNS,
1374  (void (*)(void))zero_rows_columns);
1375 
1377 };
1378 
1380  boost::shared_ptr<BlockStructure> data) {
1381 
1382  auto problem_ptr = getProblemPtr(dm);
1383  auto nb_local = problem_ptr->nbLocDofsRow;
1384  auto nb_global = problem_ptr->nbDofsRow;
1385 
1386  // check in nb, rows is equal to nb. columns.
1387  if (nb_local != problem_ptr->nbLocDofsCol) {
1388  MOFEM_LOG("SELF", Sev::error)
1389  << "Wrong size " << nb_local << " != " << problem_ptr->nbLocDofsCol;
1391  "nb. cols is inconsistent with nb. rows");
1392  }
1393  if (nb_global != problem_ptr->nbDofsCol) {
1394  MOFEM_LOG("SELF", Sev::error)
1395  << "Wrong size " << nb_global << " != " << problem_ptr->nbDofsCol;
1397  "nb. cols is inconsistent with nb. rows");
1398  }
1399 
1400  // get comm from DM
1401  MPI_Comm comm;
1402  CHKERR PetscObjectGetComm((PetscObject)dm, &comm);
1403 
1404  Mat mat_raw;
1405  CHKERR MatCreateShell(comm, nb_local, nb_local, nb_global, nb_global,
1406  (void *)data.get(), &mat_raw);
1407  CHKERR setSchurBlockMatOps(mat_raw);
1408  // CHKERR PetscObjectSetName((PetscObject)mat_raw, MoFEM_BLOCK_MAT);
1409 
1410  return std::make_pair(SmartPetscObj<Mat>(mat_raw), data);
1411 }
1412 
1413 constexpr int max_gemv_size = 2;
1414 
1416  Mat mat, Vec x, Vec y, InsertMode iora,
1417 
1418  boost::function<
1419  int(DiagBlockIndex::BlockIndex::nth_index<0>::type::iterator)>
1420  shift_extractor,
1421 
1422  boost::shared_ptr<std::vector<double>> data_blocks_ptr,
1423  bool multiply_by_preconditioner) {
1425  BlockStructure *ctx;
1426  CHKERR MatShellGetContext(mat, (void **)&ctx);
1427 
1428  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_BlockStructureMult, 0, 0, 0, 0);
1429 
1430  int x_loc_size;
1431  CHKERR VecGetLocalSize(x, &x_loc_size);
1432  int y_loc_size;
1433  CHKERR VecGetLocalSize(y, &y_loc_size);
1434 
1435  Vec ghost_x = ctx->ghostX;
1436  Vec ghost_y = ctx->ghostY;
1437 
1438  CHKERR VecCopy(x, ghost_x);
1439 
1440  double *y_array;
1441  Vec loc_ghost_y;
1442  CHKERR VecGhostGetLocalForm(ghost_y, &loc_ghost_y);
1443  int nb_y;
1444  CHKERR VecGetLocalSize(loc_ghost_y, &nb_y);
1445  CHKERR VecGetArray(loc_ghost_y, &y_array);
1446  for (auto i = 0; i != nb_y; ++i)
1447  y_array[i] = 0.;
1448  CHKERR VecRestoreArray(loc_ghost_y, &y_array);
1449  CHKERR VecGhostRestoreLocalForm(ghost_y, &loc_ghost_y);
1450 
1451  auto mult = [&](int low_x, int hi_x, int low_y, int hi_y) {
1453 
1454  double *x_array;
1455  Vec loc_ghost_x;
1456  CHKERR VecGhostGetLocalForm(ghost_x, &loc_ghost_x);
1457  CHKERR VecGetArray(loc_ghost_x, &x_array);
1458 
1459  double *y_array;
1460  Vec loc_ghost_y;
1461  CHKERR VecGhostGetLocalForm(ghost_y, &loc_ghost_y);
1462  int nb_y;
1463  CHKERR VecGetLocalSize(loc_ghost_y, &nb_y);
1464  CHKERR VecGetArray(loc_ghost_y, &y_array);
1465 
1466  double *block_ptr = &*data_blocks_ptr->begin();
1467  auto it = ctx->blockIndex.get<0>().begin();
1468  auto hi = ctx->blockIndex.get<0>().end();
1469 
1470  while (it != hi) {
1471  if (it->getLocRow() < low_y || it->getLocRow() >= hi_y ||
1472  it->getLocCol() < low_x || it->getLocCol() >= hi_x) {
1473  ++it;
1474  continue;
1475  }
1476 
1477  auto nb_rows = it->getNbRows();
1478  auto nb_cols = it->getNbCols();
1479  auto x_ptr = &x_array[it->getLocCol()];
1480  auto y_ptr = &y_array[it->getLocRow()];
1481  auto ptr = &block_ptr[shift_extractor(it)];
1482 
1483  if (std::min(nb_rows, nb_cols) > max_gemv_size) {
1484  cblas_dgemv(CblasRowMajor, CblasNoTrans, nb_rows, nb_cols, 1.0, ptr,
1485  nb_cols, x_ptr, 1, 1.0, y_ptr, 1);
1486  } else {
1487  for (auto r = 0; r != nb_rows; ++r) {
1488  for (auto c = 0; c != nb_cols; ++c) {
1489  y_ptr[r] += ptr[r * nb_cols + c] * x_ptr[c];
1490  }
1491  }
1492  }
1493  ++it;
1494  }
1495 
1496  if (multiply_by_preconditioner && ctx->multiplyByPreconditioner) {
1497 
1498  if (!ctx->preconditionerBlocksPtr)
1499  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1500  "No parentBlockStructurePtr");
1501 
1502  auto preconditioner_ptr = &*ctx->preconditionerBlocksPtr->begin();
1503 
1504  auto it = ctx->blockIndex.get<0>().begin();
1505  auto hi = ctx->blockIndex.get<0>().end();
1506 
1507  while (it != hi) {
1508  if (it->getLocRow() < low_y || it->getLocRow() >= hi_y ||
1509  it->getLocCol() < low_x || it->getLocCol() >= hi_x) {
1510  ++it;
1511  continue;
1512  }
1513 
1514  if (it->getMatShift() != -1) {
1515  auto nb_rows = it->getNbRows();
1516  auto nb_cols = it->getNbCols();
1517  auto x_ptr = &x_array[it->getLocCol()];
1518  auto y_ptr = &y_array[it->getLocRow()];
1519  auto ptr = &preconditioner_ptr[it->getMatShift()];
1520  if (std::min(nb_rows, nb_cols) > max_gemv_size) {
1521  cblas_dgemv(CblasRowMajor, CblasNoTrans, nb_rows, nb_cols, 1.0, ptr,
1522  nb_cols, x_ptr, 1, 1.0, y_ptr, 1);
1523  } else {
1524  for (auto r = 0; r != nb_rows; ++r) {
1525  for (auto c = 0; c != nb_cols; ++c) {
1526  y_ptr[r] += ptr[r * nb_cols + c] * x_ptr[c];
1527  }
1528  }
1529  }
1530  }
1531 
1532  ++it;
1533  }
1534  }
1535 
1536  CHKERR VecRestoreArray(loc_ghost_x, &x_array);
1537  CHKERR VecRestoreArray(loc_ghost_y, &y_array);
1538  CHKERR VecGhostRestoreLocalForm(ghost_x, &loc_ghost_x);
1539  CHKERR VecGhostRestoreLocalForm(ghost_y, &loc_ghost_y);
1541  };
1542 
1543  constexpr auto max_int = std::numeric_limits<int>::max();
1544  CHKERR VecGhostUpdateBegin(ghost_x, INSERT_VALUES, SCATTER_FORWARD);
1545  CHKERR mult(0, x_loc_size, 0, max_int);
1546  CHKERR VecGhostUpdateEnd(ghost_x, INSERT_VALUES, SCATTER_FORWARD);
1547  CHKERR mult(x_loc_size, max_int, 0, max_int);
1548 
1549  CHKERR VecGhostUpdateBegin(ghost_y, ADD_VALUES, SCATTER_REVERSE);
1550  CHKERR VecGhostUpdateEnd(ghost_y, ADD_VALUES, SCATTER_REVERSE);
1551 
1552  switch (iora) {
1553  case INSERT_VALUES:
1554  CHKERR VecCopy(ghost_y, y);
1555  break;
1556  case ADD_VALUES:
1557  CHKERR VecAXPY(y, 1., ghost_y);
1558  break;
1559  default:
1560  CHK_MOAB_THROW(MOFEM_NOT_IMPLEMENTED, "Wrong InsertMode");
1561  }
1562 
1563 #ifndef NDEBUG
1564 
1565  auto print_norm = [&](auto msg, auto y) {
1567  PetscReal norm;
1568  CHKERR VecNorm(y, NORM_2, &norm);
1569  int nb_loc_y;
1570  CHKERR VecGetLocalSize(y, &nb_loc_y);
1571  int nb_y;
1572  CHKERR VecGetSize(y, &nb_y);
1573  MOFEM_LOG("WORLD", Sev::noisy)
1574  << msg << " " << nb_y << " " << nb_loc_y << " norm " << norm;
1576  };
1577 
1578  switch (iora) {
1579  case INSERT_VALUES:
1580  print_norm("mult_schur_block_shell insert x", x);
1581  print_norm("mult_schur_block_shell insert y", y);
1582  break;
1583  case ADD_VALUES:
1584  print_norm("mult_schur_block_shell add x", x);
1585  print_norm("mult_schur_block_shell add y", y);
1586  break;
1587  default:
1588  CHK_MOAB_THROW(MOFEM_NOT_IMPLEMENTED, "Wrong InsertMode");
1589  }
1590 
1591 #endif // NDEBUG
1592 
1593  // PetscLogFlops(xxx)
1594  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_BlockStructureMult, 0, 0, 0, 0);
1595 
1597 }
1598 
1600  InsertMode iora) {
1601  using matrix_range = ublas::matrix_range<MatrixDouble>;
1602  using range = ublas::range;
1604  BlockStructure *ctx;
1605  CHKERR MatShellGetContext(mat, (void **)&ctx);
1606 
1607  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_BlockStructureSolve, 0, 0, 0, 0);
1608 
1609  if (ctx->multiplyByPreconditioner) {
1610  if (!ctx->preconditionerBlocksPtr)
1611  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1612  "No preconditionerBlocksPtr");
1613  }
1614 
1615  if (iora == INSERT_VALUES) {
1616  CHKERR VecZeroEntries(x);
1617  }
1618 
1619  double *x_array;
1620  CHKERR VecGetArray(x, &x_array);
1621  double *y_array;
1622  CHKERR VecGetArray(y, &y_array);
1623 
1624  std::vector<const DiagBlockIndex::Indexes *> blocks;
1626  VectorDouble block_y;
1627  VectorInt ipiv;
1628 
1629  auto &block_index = ctx->blockIndex.get<1>();
1630  for (auto it = block_index.begin(); it != block_index.end();) {
1631 
1632  // get blocks on finit element
1633  blocks.clear();
1634  auto last_uid = it->getFEUId();
1635  while (it != block_index.end() && it->getFEUId() == last_uid) {
1636  blocks.push_back(&*it);
1637  ++it;
1638  }
1639 
1640  // set local indices
1641  std::map<UId, std::tuple<int, int, int>> block_indexing; // uid block map
1642  for (auto b : blocks) {
1643  if (block_indexing.find(b->getRowUId()) == block_indexing.end()) {
1644  block_indexing[b->getRowUId()] =
1645  std::make_tuple(b->getNbRows(), b->getNbRows(), b->getLocRow());
1646  }
1647  }
1648  std::sort(blocks.begin(), blocks.end(), [](auto a, auto b) {
1649  if (a->getRowUId() == b->getRowUId())
1650  return a->getColUId() < b->getColUId();
1651  else
1652  return a->getRowUId() < b->getRowUId();
1653  });
1654 
1655  // set indexes to block
1656  int mat_block_size = 0; // size of block matrix
1657  for (auto &b : block_indexing) {
1658  auto &[index, size, loc] = b.second;
1659  index = mat_block_size;
1660  mat_block_size += size;
1661  }
1662 
1663  auto get_range = [](auto &b) {
1664  auto [index, size, loc] = b;
1665  return range(index, index + size);
1666  };
1667 
1668  std::vector<std::tuple<int, int, int, int, int>> block_data;
1669  block_data.reserve(blocks.size());
1670  for (auto s : blocks) {
1671  auto ruid = s->getRowUId();
1672  auto cuid = s->getColUId();
1673  auto &rbi = block_indexing.at(ruid);
1674  auto &cbi = block_indexing.at(cuid);
1675  if (auto shift = s->getMatShift(); shift != -1) {
1676  block_data.push_back(std::make_tuple(
1677 
1678  shift,
1679 
1680  s->getNbRows(), s->getNbCols(),
1681 
1682  get<0>(rbi), get<0>(cbi))
1683 
1684  );
1685  }
1686  }
1687  block_mat.resize(mat_block_size, mat_block_size, false);
1688  block_mat.clear();
1689  for (auto &bd : block_data) {
1690  auto &[shift, nb_rows, nb_cols, ridx, cidx] = bd;
1691  auto ptr = &(*(ctx->dataBlocksPtr))[shift];
1692  for (auto i = 0; i != nb_rows; ++i, ptr += nb_cols) {
1693  auto sub_ptr = &block_mat(ridx + i, cidx);
1694  cblas_dcopy(nb_cols, ptr, 1, sub_ptr, 1);
1695  }
1696  }
1697  if (ctx->multiplyByPreconditioner) {
1698  for (auto &bd : block_data) {
1699  auto &[shift, nb_rows, nb_cols, ridx, cidx] = bd;
1700  auto ptr = &(*(ctx->preconditionerBlocksPtr))[shift];
1701  for (auto i = 0; i != nb_rows; ++i, ptr += nb_cols) {
1702  auto sub_ptr = &block_mat(ridx + i, cidx);
1703  cblas_daxpy(nb_cols, 1., ptr, 1, sub_ptr, 1);
1704  }
1705  }
1706  }
1707  block_mat = trans(block_mat);
1708 
1709  block_y.resize(mat_block_size, false);
1710  block_y.clear();
1711 
1712  for (auto &b : block_indexing) {
1713  auto [index, size, loc] = b.second;
1714  auto ptr = &y_array[loc];
1715  cblas_dcopy(size, ptr, 1, &block_y[index], 1);
1716  }
1717 
1718  // note: this not exploits symmetry, requires more implementation
1719  constexpr int nrhs = 1;
1720  ipiv.resize(mat_block_size, false);
1721  auto info = lapack_dgesv(mat_block_size, nrhs, &*block_mat.data().begin(),
1722  mat_block_size, &*ipiv.data().begin(),
1723  &*block_y.data().begin(), mat_block_size);
1724  if (info != 0) {
1725  SETERRQ1(PETSC_COMM_SELF, MOFEM_OPERATION_UNSUCCESSFUL,
1726  "error lapack solve dgesv info = %d", info);
1727  }
1728 
1729  for (auto &b : block_indexing) {
1730  auto [index, size, loc] = b.second;
1731  auto ptr = &x_array[loc];
1732  cblas_daxpy(size, 1.0, &block_y[index], 1, ptr, 1);
1733  }
1734  }
1735 
1736  CHKERR VecRestoreArray(x, &x_array);
1737  CHKERR VecRestoreArray(y, &y_array);
1738 
1739  // PetscLogFlops(xxx)
1740  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_BlockStructureSolve, 0, 0, 0, 0);
1741 
1743 }
1744 
1746  BlockStructure *ctx, const EntitiesFieldData::EntData &row_data,
1747  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
1748  InsertMode iora,
1749  boost::function<int(const DiagBlockIndex::Indexes *)> shift_extractor,
1750  boost::shared_ptr<std::vector<double>> data_blocks_ptr) {
1751 
1753 
1754  if (row_data.getIndices().empty())
1756  if (col_data.getIndices().empty())
1758 
1759 #ifndef NDEBUG
1760 
1761  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_BlockStructureSetValues, 0, 0, 0,
1762  0);
1763 
1764 #endif // NDEBUG
1765 
1766  switch (iora) {
1767  case ADD_VALUES:
1768  case INSERT_VALUES:
1769  break;
1770  default:
1771  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED, "Wrong InsertMode");
1772  }
1773 
1774  auto get_row_data = [&]() -> std::pair<bool, VectorInt> {
1775  if (auto e_ptr = row_data.getFieldEntities()[0]) {
1776  if (auto stored_data_ptr =
1777  e_ptr->getSharedStoragePtr<EssentialBcStorage>()) {
1778  MOFEM_LOG("SELF", Sev::warning) << "Can lead to unhandled behaviour";
1779  return std::make_pair(true, stored_data_ptr->entityIndices);
1780  }
1781  }
1782  return std::make_pair(false, row_data.getIndices());
1783  };
1784 
1785  auto row_indices = get_row_data();
1786 
1787  std::vector<int> ent_row_indices;
1788  std::vector<int> ent_col_indices;
1789 
1790  for (auto &rent : row_data.getFieldEntities()) {
1791  if (auto r_cache = rent->entityCacheRowDofs.lock()) {
1792 
1793  auto &row_uid = rent->getLocalUniqueId();
1794  auto &row_ind = row_indices.second;
1795 
1796  for (auto &cent : col_data.getFieldEntities()) {
1797  if (auto c_cache = cent->entityCacheColDofs.lock()) {
1798 
1799  auto &col_uid = cent->getLocalUniqueId();
1800  auto &col_ind = col_data.getIndices();
1801 
1802  auto it = ctx->blockIndex.get<0>().find(
1803 
1804  boost::make_tuple(row_uid, col_uid)
1805 
1806  );
1807 
1808 #ifndef NDEBUG
1809 
1810  if (it == ctx->blockIndex.get<0>().end()) {
1811  MOFEM_LOG_CHANNEL("SELF");
1812  MOFEM_TAG_AND_LOG("SELF", Sev::error, "BlockMat")
1813  << "missing block: row "
1814  << row_data.getFieldDofs()[0]->getName() << " col "
1815  << col_data.getFieldDofs()[0]->getName();
1816  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1817  "Block not allocated");
1818  }
1819 
1820 #endif
1821 
1822  auto ri = row_ind.begin();
1823  auto rie = row_ind.end();
1824 
1825  auto shift = shift_extractor(&*it);
1826  auto s_mat = &(*data_blocks_ptr)[shift];
1827 
1828  auto get_ent_indices = [](auto &cache, auto &ind) {
1829  ind.clear();
1830  ind.reserve(std::distance(cache->loHi[0], cache->loHi[1]));
1831  for (auto e = cache->loHi[0]; e != cache->loHi[1]; ++e) {
1832  auto glob = (*e)->getPetscGlobalDofIdx();
1833  if (glob != -1)
1834  ind.push_back(glob);
1835  }
1836  };
1837 
1838  get_ent_indices(r_cache, ent_row_indices);
1839  if (ent_row_indices.empty())
1840  continue;
1841  get_ent_indices(c_cache, ent_col_indices);
1842  if (ent_col_indices.empty())
1843  continue;
1844 
1845  if (mat.size1() == ent_row_indices.size() &&
1846  mat.size2() == ent_col_indices.size()) {
1847 
1848  if (iora == ADD_VALUES) {
1849  cblas_daxpy(mat.data().size(), 1.0, mat.data().data(), 1, s_mat,
1850  1);
1851  } else {
1852  cblas_dcopy(mat.data().size(), mat.data().data(), 1, s_mat, 1);
1853  }
1854 
1855  } else {
1856 
1857  int row = 0;
1858  for (auto re : ent_row_indices) {
1859  ri = std::find(ri, rie, re);
1860  if (!(ri == rie && *ri != -1)) {
1861 
1862  auto ci = col_ind.begin();
1863  auto cie = col_ind.end();
1864  auto ce = c_cache->loHi[0];
1865 
1866  int col = 0;
1867  for (auto ce : ent_col_indices) {
1868  ci = std::find(ci, cie, ce);
1869  if (!(ci == cie && *ci != -1)) {
1870  auto &m = s_mat[row * ent_col_indices.size() + col];
1871  if (iora == ADD_VALUES) {
1872  m += mat(std::distance(row_ind.begin(), ri),
1873  std::distance(col_ind.begin(), ci));
1874  } else {
1875  m = mat(std::distance(row_ind.begin(), ri),
1876  std::distance(col_ind.begin(), ci));
1877  }
1878  }
1879  ++col;
1880  } // cols
1881  }
1882  ++row;
1883  } // rows
1884  }
1885 
1886  } // iterate entities
1887  }
1888  }
1889  }
1890 
1891 #ifndef NDEBUG
1892  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_BlockStructureSetValues, 0, 0, 0,
1893  0);
1894 #endif // NDEBUG
1895 
1897 }
1898 
1901  const EntitiesFieldData::EntData &col_data,
1902  const MatrixDouble &mat, InsertMode iora) {
1904  PetscBool is_mat_shell = PETSC_FALSE;
1905  PetscObjectTypeCompare((PetscObject)M, MATSHELL, &is_mat_shell);
1906  if (is_mat_shell) {
1907  BlockStructure *ctx;
1908  CHKERR MatShellGetContext(M, (void **)&ctx);
1910  ctx, row_data, col_data, mat, iora,
1911  [](const DiagBlockIndex::Indexes *idx) { return idx->getMatShift(); },
1912  ctx->dataBlocksPtr);
1913  } else {
1914  CHKERR MatSetValues<AssemblyTypeSelector<PETSC>>(M, row_data, col_data, mat,
1915  iora);
1916  }
1918 }
1919 
1921  Mat M, const EntitiesFieldData::EntData &row_data,
1922  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
1923  InsertMode iora) {
1925  PetscBool is_mat_shell = PETSC_FALSE;
1926  PetscObjectTypeCompare((PetscObject)M, MATSHELL, &is_mat_shell);
1927  if (is_mat_shell) {
1928  BlockStructure *ctx;
1929  CHKERR MatShellGetContext(M, (void **)&ctx);
1930  if (!ctx->preconditionerBlocksPtr)
1931  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1932  "No preconditionerBlocksPtr");
1934  ctx, row_data, col_data, mat, iora,
1935  [](const DiagBlockIndex::Indexes *idx) { return idx->getMatShift(); },
1936  ctx->preconditionerBlocksPtr);
1937  } else {
1938  CHKERR MatSetValues<AssemblyTypeSelector<PETSC>>(M, row_data, col_data, mat,
1939  iora);
1940  }
1942 }
1943 
1944 boost::shared_ptr<NestSchurData> createSchurNestedMatrixStruture(
1945 
1946  std::pair<SmartPetscObj<DM>, SmartPetscObj<DM>> dms,
1947  boost::shared_ptr<BlockStructure> block_mat_data_ptr,
1948 
1949  std::vector<std::string> fields_names, //< a00 fields
1950  std::vector<boost::shared_ptr<Range>>
1951  field_ents, //< a00 ranges (can be null),
1952  bool add_preconditioner_block) {
1953 
1954  if (!block_mat_data_ptr) {
1955  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block data not set");
1956  }
1957 
1958  if (fields_names.size() != field_ents.size())
1960  "fields_names.size() != field_ents.size()");
1961 
1962  auto [schur_dm, block_dm] = dms;
1963  auto schur_prb = getProblemPtr(schur_dm);
1964  auto block_prb = getProblemPtr(block_dm);
1965  // auto m_field_ptr = getInterfacePtr(block_dm);
1966 
1967  auto schur_dofs_row = schur_prb->getNumeredRowDofsPtr();
1968  auto schur_dofs_col = schur_prb->getNumeredColDofsPtr();
1969  auto block_dofs_row = block_prb->getNumeredRowDofsPtr();
1970  auto block_dofs_col = block_prb->getNumeredColDofsPtr();
1971 
1972  auto ao_schur_row = schur_prb->getSubData()->getSmartRowMap();
1973  auto ao_schur_col = schur_prb->getSubData()->getSmartColMap();
1974  auto ao_block_row = block_prb->getSubData()->getSmartRowMap();
1975  auto ao_block_col = block_prb->getSubData()->getSmartColMap();
1976 
1977  auto schur_vec_x = createDMVector(schur_dm);
1978  auto block_vec_x = createDMVector(block_dm);
1979  auto schur_vec_y = vectorDuplicate(schur_vec_x);
1980  auto block_vec_y = vectorDuplicate(block_vec_x);
1981  CHKERR VecSetDM(schur_vec_x, PETSC_NULL);
1982  CHKERR VecSetDM(block_vec_x, PETSC_NULL);
1983  CHKERR VecSetDM(schur_vec_y, PETSC_NULL);
1984  CHKERR VecSetDM(block_vec_y, PETSC_NULL);
1985 
1986  auto find_field_ent = [&](auto uid, auto prb, auto rc) {
1987  boost::shared_ptr<NumeredDofEntity_multiIndex> dofs;
1988 
1989  switch (rc) {
1990  case ROW:
1991  dofs = prb->getNumeredRowDofsPtr();
1992  break;
1993  case COL:
1994  dofs = prb->getNumeredColDofsPtr();
1995  break;
1996  default:
1997  CHK_MOAB_THROW(MOFEM_NOT_IMPLEMENTED, "Wrong RowCol");
1998  break;
1999  }
2000 
2001  auto lo = dofs->get<Unique_mi_tag>().lower_bound(uid);
2002  if (lo == dofs->get<Unique_mi_tag>().end())
2003  return boost::shared_ptr<NumeredDofEntity>();
2004  auto hi = dofs->get<Unique_mi_tag>().upper_bound(
2005  DofEntity::getUniqueIdCalculate(MAX_DOFS_ON_ENTITY - 1, uid));
2006  if (lo != hi)
2007  return *lo;
2008 
2009  return boost::shared_ptr<NumeredDofEntity>();
2010  };
2011 
2012  std::array<boost::shared_ptr<BlockStructure>, 4> data_ptrs;
2013 
2014  for (auto r = 0; r != 3; ++r) {
2015  data_ptrs[r] = boost::make_shared<BlockStructure>();
2016  data_ptrs[r]->dataBlocksPtr = block_mat_data_ptr->dataBlocksPtr;
2017  }
2018  data_ptrs[3] = boost::make_shared<BlockStructure>();
2019  data_ptrs[3]->dataBlocksPtr = block_mat_data_ptr->dataBlocksPtr;
2020 
2021  data_ptrs[0]->ghostX = schur_vec_x;
2022  data_ptrs[0]->ghostY = schur_vec_y;
2023  data_ptrs[1]->ghostX = block_vec_x;
2024  data_ptrs[1]->ghostY = schur_vec_y;
2025  data_ptrs[2]->ghostX = schur_vec_x;
2026  data_ptrs[2]->ghostY = block_vec_y;
2027  data_ptrs[3]->ghostX = block_vec_x;
2028  data_ptrs[3]->ghostY = block_vec_y;
2029 
2030  int idx = 0;
2031  for (auto &d : block_mat_data_ptr->blockIndex.get<0>()) {
2032 
2033  auto insert = [&](auto &m, auto &dof_r, auto &dof_c, auto &d) {
2034  m.insert(
2035 
2037  d.getRowUId(), d.getColUId(), d.getFEUId(),
2038 
2039  dof_r->getPetscGlobalDofIdx(), dof_c->getPetscGlobalDofIdx(),
2040 
2041  d.getNbRows(), d.getNbCols(),
2042 
2043  dof_r->getPetscLocalDofIdx(), dof_c->getPetscLocalDofIdx(),
2044 
2045  d.getMatShift()}
2046 
2047  );
2048  };
2049 
2050  auto dof_schur_row = find_field_ent(d.getRowUId(), schur_prb, ROW);
2051  auto dof_schur_col = find_field_ent(d.getColUId(), schur_prb, COL);
2052  auto dof_block_row = find_field_ent(d.getRowUId(), block_prb, ROW);
2053  auto dof_block_col = find_field_ent(d.getColUId(), block_prb, COL);
2054 
2055  if (dof_schur_row && dof_schur_col) {
2056  insert(data_ptrs[0]->blockIndex, dof_schur_row, dof_schur_col, d);
2057  }
2058 
2059  if (dof_schur_row && dof_block_col) {
2060  insert(data_ptrs[1]->blockIndex, dof_schur_row, dof_block_col, d);
2061  }
2062 
2063  if (dof_block_row && dof_schur_col) {
2064  insert(data_ptrs[2]->blockIndex, dof_block_row, dof_schur_col, d);
2065  }
2066 
2067  if (dof_block_row && dof_block_col) {
2068  insert(data_ptrs[3]->blockIndex, dof_block_row, dof_block_col, d);
2069  }
2070 
2071  ++idx;
2072  }
2073 
2074  // set data for a00 solve (inverse blocks)
2075  auto set_up_a00_data = [&](auto inv_block_data) {
2077 
2078  if (add_preconditioner_block) {
2079  auto preconditioned_block = boost::make_shared<std::vector<double>>(
2080  inv_block_data->dataBlocksPtr->size(), 0);
2081  inv_block_data->preconditionerBlocksPtr = preconditioned_block;
2082  inv_block_data->multiplyByPreconditioner = true;
2083  block_mat_data_ptr->preconditionerBlocksPtr =
2084  inv_block_data->preconditionerBlocksPtr;
2085  block_mat_data_ptr->multiplyByPreconditioner = false;
2086  }
2087 
2089  };
2090 
2091  CHKERR set_up_a00_data(data_ptrs[3]);
2092 
2093  MPI_Comm comm;
2094  CHKERR PetscObjectGetComm((PetscObject)schur_dm, &comm);
2095 
2096  auto create_shell_mat = [&](auto nb_r_loc, auto nb_c_loc, auto nb_r_glob,
2097  auto nb_c_glob, auto data_ptr) {
2098  Mat mat_raw;
2099  CHKERR MatCreateShell(comm, nb_r_loc, nb_c_loc, nb_r_glob, nb_c_glob,
2100  (void *)data_ptr.get(), &mat_raw);
2101  CHKERR setSchurBlockMatOps(mat_raw);
2102  return SmartPetscObj<Mat>(mat_raw);
2103  };
2104 
2105  auto schur_nb_global = schur_prb->getNbDofsRow();
2106  auto block_nb_global = block_prb->getNbDofsRow();
2107  auto schur_nb_local = schur_prb->getNbLocalDofsRow();
2108  auto block_nb_local = block_prb->getNbLocalDofsRow();
2109 
2110  std::array<SmartPetscObj<Mat>, 4> mats_array;
2111  mats_array[0] =
2112  create_shell_mat(schur_nb_local, schur_nb_local, schur_nb_global,
2113  schur_nb_global, data_ptrs[0]);
2114  mats_array[1] =
2115  create_shell_mat(schur_nb_local, block_nb_local, schur_nb_global,
2116  block_nb_global, data_ptrs[1]);
2117  mats_array[2] =
2118  create_shell_mat(block_nb_local, schur_nb_local, block_nb_global,
2119  schur_nb_global, data_ptrs[2]);
2120  mats_array[3] =
2121  create_shell_mat(block_nb_local, block_nb_local, block_nb_global,
2122  block_nb_global, data_ptrs[3]);
2123 
2124  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2125  << "(0, 0) " << schur_nb_local << " " << schur_nb_global << " "
2126  << data_ptrs[0]->blockIndex.size();
2127  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2128  << "(0, 1) " << schur_nb_local << " " << block_nb_local << " "
2129  << schur_nb_global << " " << block_nb_global << " "
2130  << data_ptrs[1]->blockIndex.size();
2131  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2132  << "(1, 0) " << block_nb_local << " " << schur_nb_local << " "
2133  << block_nb_global << " " << schur_nb_global << " "
2134  << data_ptrs[2]->blockIndex.size();
2135  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2136  << "(1, 1) " << block_nb_local << " " << block_nb_global << " "
2137  << data_ptrs[3]->blockIndex.size();
2138 
2139  MOFEM_LOG_SEVERITY_SYNC(comm, Sev::verbose);
2140 
2141  auto schur_is = schur_prb->getSubData()->getSmartRowIs();
2142  auto block_is = block_prb->getSubData()->getSmartRowIs();
2143 
2144  return boost::make_shared<NestSchurData>(
2145 
2146  mats_array, data_ptrs, block_mat_data_ptr,
2147  std::make_pair(schur_is, block_is)
2148 
2149  );
2150 }
2151 
2152 std::pair<SmartPetscObj<Mat>, boost::shared_ptr<NestSchurData>>
2153 createSchurNestedMatrix(boost::shared_ptr<NestSchurData> schur_net_data_ptr) {
2154 
2155  if (!schur_net_data_ptr)
2157 
2158  auto [mat_arrays, data_ptrs, block_mat_data_ptr, iss] = *schur_net_data_ptr;
2159  auto [schur_is, block_is] = iss;
2160 
2161  std::array<IS, 2> is_a = {schur_is, block_is};
2162  std::array<Mat, 4> mats_a = {
2163 
2164  mat_arrays[0], mat_arrays[1], mat_arrays[2], mat_arrays[3]
2165 
2166  };
2167 
2168  MPI_Comm comm;
2169  CHKERR PetscObjectGetComm((PetscObject)mat_arrays[0], &comm);
2170 
2171  Mat mat_raw;
2172  CHKERR MatCreateNest(
2173 
2174  comm, 2, is_a.data(), 2, is_a.data(), mats_a.data(), &mat_raw
2175 
2176  );
2177 
2178  return std::make_pair(SmartPetscObj<Mat>(mat_raw), schur_net_data_ptr);
2179 }
2180 
2182  return new OpSchurAssembleBegin();
2183 }
2184 
2185 OpSchurAssembleBase *
2186 createOpSchurAssembleEnd(std::vector<std::string> fields_name,
2187  std::vector<boost::shared_ptr<Range>> field_ents,
2189  bool sym_schur, bool symm_op) {
2190  if (symm_op)
2191  return new OpSchurAssembleEnd<SchurDSYSV>(fields_name, field_ents, ao,
2192  schur, sym_schur, symm_op);
2193  else
2194  return new OpSchurAssembleEnd<SchurDGESV>(fields_name, field_ents, ao,
2195  schur, sym_schur, symm_op);
2196 }
2197 
2198 OpSchurAssembleBase *
2199 createOpSchurAssembleEnd(std::vector<std::string> fields_name,
2200  std::vector<boost::shared_ptr<Range>> field_ents,
2201  std::vector<SmartPetscObj<AO>> sequence_of_aos,
2202  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
2203  std::vector<bool> sym_schur, bool symm_op,
2204  boost::shared_ptr<BlockStructure> diag_blocks) {
2205  return createOpSchurAssembleEnd(
2206  fields_name, field_ents, sequence_of_aos.back(), sequence_of_mats.back(),
2207  sym_schur.back(), symm_op);
2208 }
2209 
2210 OpSchurAssembleBase *
2211 createOpSchurAssembleEnd(std::vector<std::string> fields_name,
2212  std::vector<boost::shared_ptr<Range>> field_ents,
2213  std::vector<SmartPetscObj<AO>> sequence_of_aos,
2214  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
2215  std::vector<bool> sym_schur,
2216  std::vector<double> diag_eps, bool symm_op,
2217  boost::shared_ptr<BlockStructure> diag_blocks) {
2218  return createOpSchurAssembleEnd(
2219  fields_name, field_ents, sequence_of_aos.back(), sequence_of_mats.back(),
2220  sym_schur.back(), symm_op);
2221 }
2222 
2225 
2226  auto apply = [](PC pc, Vec f, Vec x) {
2228  Mat A, P;
2229  CHKERR PCGetOperators(pc, &A, &P);
2230  CHKERR MatSolve(P, f, x);
2232  };
2233 
2234  CHKERR PCSetType(pc, PCSHELL);
2235  CHKERR PCShellSetApply(pc, apply);
2236  CHKERR PCShellSetName(pc, "MoFEMSchurBlockPC");
2237 
2239 }
2240 
2242  return setSchurA00MatSolvePC(pc);
2243 }
2244 
2245 // This is used to assemble local element matrices for Schur complement
2246 // and matrix for KSP
2247 template <>
2250  const EntitiesFieldData::EntData &col_data,
2251  const MatrixDouble &mat, InsertMode iora) {
2252  return SchurElemMats::MatSetValues(M, row_data, col_data, mat, iora);
2253 }
2254 
2257  const EntitiesFieldData::EntData &col_data,
2258  const MatrixDouble &mat, InsertMode iora) {
2259  return MatSetValues<AssemblyTypeSelector<PETSC>>(M, row_data, col_data, mat,
2260  iora);
2261 }
2262 
2263 // Is assumed that standard PETSc assembly works for matrices used by KSP
2264 SchurBackendMatSetValuesPtr::MatSetValuesPtr
2265  SchurBackendMatSetValuesPtr::matSetValuesPtr = schur_mat_set_values_wrap;
2266 
2267 // We assemble matrix for KSP and store local matrices for Schur complement.
2268 // Schur complement is calculated and assembled in OpSchurAssembleEnd.
2271  const EntitiesFieldData::EntData &col_data,
2272  const MatrixDouble &mat, InsertMode iora) {
2274  CHKERR assembleStorage(row_data, col_data, mat, iora);
2275  CHKERR SchurBackendMatSetValuesPtr::matSetValuesPtr(M, row_data, col_data,
2276  mat, iora);
2278 }
2279 
2280 // All is now wrapped in specialisation of
2281 // MatSetValues<AssemblyTypeSelector<SCHUR>>
2282 template <>
2283 MoFEMErrorCode MatSetValues<AssemblyTypeSelector<SCHUR>>(
2284  Mat M, const EntitiesFieldData::EntData &row_data,
2285  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
2286  InsertMode iora) {
2287  return MatSetValues<SchurElemMats>(M, row_data, col_data, mat, iora);
2288 }
2289 
2290 // Standard (PETSc) assembly block matrices do noe work
2291 template <>
2294  const EntitiesFieldData::EntData &col_data,
2295  const MatrixDouble &mat, InsertMode iora) {
2296  return shell_block_mat_asmb_wrap(M, row_data, col_data, mat, iora);
2297 }
2298 
2300 
2301  static MoFEMErrorCode MatSetValues(Mat M,
2302  const EntitiesFieldData::EntData &row_data,
2303  const EntitiesFieldData::EntData &col_data,
2304  const MatrixDouble &mat, InsertMode iora);
2305 };
2306 
2308  SchurBackendMatSetValuesPtr::matSetValuesBlockPtr =
2310 
2313  const EntitiesFieldData::EntData &row_data,
2314  const EntitiesFieldData::EntData &col_data,
2315  const MatrixDouble &mat, InsertMode iora) {
2317  CHKERR assembleStorage(row_data, col_data, mat, iora);
2318  CHKERR SchurBackendMatSetValuesPtr::matSetValuesBlockPtr(M, row_data,
2319  col_data, mat, iora);
2321 }
2322 
2323 template <>
2326  const EntitiesFieldData::EntData &row_data,
2327  const EntitiesFieldData::EntData &col_data,
2328  const MatrixDouble &mat, InsertMode iora) {
2329  return SchurElemMatsBlock::MatSetValues(M, row_data, col_data, mat, iora);
2330 }
2331 
2333 
2334  static MoFEMErrorCode MatSetValues(Mat M,
2335  const EntitiesFieldData::EntData &row_data,
2336  const EntitiesFieldData::EntData &col_data,
2337  const MatrixDouble &mat, InsertMode iora);
2338 };
2339 
2341  SchurBackendMatSetValuesPtr::matSetValuesPreconditionedBlockPtr =
2343 
2345  Mat M, const EntitiesFieldData::EntData &row_data,
2346  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
2347  InsertMode iora) {
2349  CHKERR assembleStorage(row_data, col_data, mat, iora);
2350  CHKERR SchurBackendMatSetValuesPtr::matSetValuesPreconditionedBlockPtr(
2351  M, row_data, col_data, mat, iora);
2353 }
2354 
2355 template <>
2357  Mat M, const EntitiesFieldData::EntData &row_data,
2358  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
2359  InsertMode iora) {
2360  return SchurElemMatsPreconditionedBlock::MatSetValues(M, row_data, col_data,
2361  mat, iora);
2362 }
2363 
2365 schurSwitchPreconditioner(boost::shared_ptr<BlockStructure> block_mat_data) {
2367  if (block_mat_data->multiplyByPreconditioner) {
2368  block_mat_data->multiplyByPreconditioner = false;
2369  } else {
2370  if (!block_mat_data->preconditionerBlocksPtr) {
2371  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
2372  "preconditionerBlocksPtr not set");
2373  }
2374  block_mat_data->multiplyByPreconditioner = true;
2375  }
2377 }
2378 
2380 schurSaveBlockMesh(boost::shared_ptr<BlockStructure> block_mat_data,
2381  std::string filename) {
2383 
2384  moab::Core core;
2385  moab::Interface &moab = core;
2386 
2387  ReadUtilIface *iface;
2388  CHKERR moab.query_interface(iface);
2389 
2390  auto size = block_mat_data->blockIndex.size();
2391 
2392  EntityHandle starting_vertex;
2393  vector<double *> arrays_coord;
2394  CHKERR iface->get_node_coords(3, 4 * size, 0, starting_vertex, arrays_coord);
2395  EntityHandle starting_handle;
2396  EntityHandle *array;
2397  CHKERR iface->get_element_connect(size, 4, MBQUAD, 0, starting_handle, array);
2398 
2399  double def_val_nrm2 = 0;
2400  Tag th_nrm2;
2401  CHKERR moab.tag_get_handle("nrm2", 1, MB_TYPE_DOUBLE, th_nrm2,
2402  MB_TAG_CREAT | MB_TAG_DENSE, &def_val_nrm2);
2403 
2404  int def_val_mat_shift = 0;
2405  Tag th_mat_shift;
2406  CHKERR moab.tag_get_handle("mat_shift", 1, MB_TYPE_INTEGER, th_mat_shift,
2407  MB_TAG_CREAT | MB_TAG_DENSE, &def_val_mat_shift);
2408 
2409  int i = 0;
2410  for (auto &d : block_mat_data->blockIndex) {
2411  auto row = d.getRow();
2412  auto col = d.getCol();
2413  auto nb_rows = d.getNbRows();
2414  auto nb_cols = d.getNbCols();
2415  auto mat_shift = d.getMatShift();
2416 
2417  // q0
2418  arrays_coord[0][4 * i + 0] = row;
2419  arrays_coord[1][4 * i + 0] = col;
2420  arrays_coord[2][4 * i + 0] = 0;
2421 
2422  // q1
2423  arrays_coord[0][4 * i + 1] = row + nb_rows;
2424  arrays_coord[1][4 * i + 1] = col;
2425  arrays_coord[2][4 * i + 1] = 0;
2426 
2427  // q2
2428  arrays_coord[0][4 * i + 2] = row + nb_rows;
2429  arrays_coord[1][4 * i + 2] = col + nb_cols;
2430  arrays_coord[2][4 * i + 2] = 0;
2431 
2432  // q3
2433  arrays_coord[0][4 * i + 3] = row;
2434  arrays_coord[1][4 * i + 3] = col + nb_cols;
2435  arrays_coord[2][4 * i + 3] = 0;
2436 
2437  // ele conn
2438  array[4 * i + 0] = starting_vertex + 4 * i + 0;
2439  array[4 * i + 1] = starting_vertex + 4 * i + 1;
2440  array[4 * i + 2] = starting_vertex + 4 * i + 2;
2441  array[4 * i + 3] = starting_vertex + 4 * i + 3;
2442 
2443  auto prt = &(*block_mat_data->dataBlocksPtr)[d.getMatShift()];
2444  auto nrm2 = cblas_dnrm2(nb_rows * nb_cols, prt, 1);
2445  EntityHandle ele = starting_handle + i;
2446  CHKERR moab.tag_set_data(th_nrm2, &ele, 1, &nrm2);
2447  CHKERR moab.tag_set_data(th_mat_shift, &ele, 1, &mat_shift);
2448 
2449  ++i;
2450  }
2451 
2452  CHKERR iface->update_adjacencies(starting_handle, size, 4, array);
2453  CHKERR moab.write_file(filename.c_str(), "VTK", "");
2454 
2456 }
2457 
2458 } // namespace MoFEM
UBlasMatrix< double >
NOSPACE
@ NOSPACE
Definition: definitions.h:83
MoFEMFunctionReturnHot
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:460
CHK_MOAB_THROW
#define CHK_MOAB_THROW(err, msg)
Check error code of MoAB function and throw MoFEM exception.
Definition: definitions.h:589
MoFEM::id_from_handle
auto id_from_handle(const EntityHandle h)
Definition: Templates.hpp:1890
MoFEM::SchurElemMats::rowIndices
static boost::ptr_vector< VectorInt > rowIndices
Definition: Schur.cpp:198
MoFEM::EntitiesFieldData::EntData
Data on single entity (This is passed as argument to DataOperator::doWork)
Definition: EntitiesFieldData.hpp:128
MoFEM::DiagBlockIndex
Definition: Schur.cpp:206
MoFEM::EntitiesFieldData::EntData::getFieldEntities
const VectorFieldEntities & getFieldEntities() const
get field entities
Definition: EntitiesFieldData.hpp:1277
MoFEM::OpSchurAssembleEndImpl::invMat
MatrixDouble invMat
Definition: Schur.cpp:113
MoFEM::BlockStructure::dataBlocksPtr
boost::shared_ptr< std::vector< double > > dataBlocksPtr
Definition: Schur.cpp:285
MoFEM::DiagBlockIndex::Indexes::row
int row
Definition: Schur.cpp:245
MoFEM::zero_rows_columns
static PetscErrorCode zero_rows_columns(Mat A, PetscInt N, const PetscInt rows[], PetscScalar diag, Vec x, Vec b)
Definition: Schur.cpp:1230
MoFEM::MatSetValues
MoFEMErrorCode MatSetValues(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const double *ptr, InsertMode iora)
Assemble PETSc matrix.
Definition: EntitiesFieldData.hpp:1644
MoFEM::DiagBlockIndex::blockIndex
BlockIndex blockIndex
blocks indexes storage
Definition: Schur.cpp:277
MoFEM::field_bit_from_bit_number
auto field_bit_from_bit_number(const int bit_number)
get field bit id from bit number
Definition: Templates.hpp:1930
MoFEM::DiagBlockIndex::Indexes::nb_cols
int nb_cols
Definition: Schur.cpp:248
MOFEM_LOG_SEVERITY_SYNC
#define MOFEM_LOG_SEVERITY_SYNC(comm, severity)
Synchronise "SYNC" on curtain severity level.
Definition: LogManager.hpp:352
MoFEM::createISGeneral
auto createISGeneral(MPI_Comm comm, PetscInt n, const PetscInt idx[], PetscCopyMode mode)
Creates a data structure for an index set containing a list of integers.
Definition: PetscSmartObj.hpp:287
MoFEM::createOpSchurAssembleEnd
OpSchurAssembleBase * createOpSchurAssembleEnd(std::vector< std::string > fields_name, std::vector< boost::shared_ptr< Range >> field_ents, std::vector< SmartPetscObj< AO >> sequence_of_aos, std::vector< SmartPetscObj< Mat >> sequence_of_mats, std::vector< bool > sym_schur, std::vector< double > diag_eps, bool symm_op, boost::shared_ptr< BlockStructure > diag_blocks)
Definition: Schur.cpp:2211
MOFEM_LOG_CHANNEL
#define MOFEM_LOG_CHANNEL(channel)
Set and reset channel.
Definition: LogManager.hpp:284
EntityHandle
MoFEM::BlockStructure::preconditionerBlocksPtr
boost::shared_ptr< std::vector< double > > preconditionerBlocksPtr
Definition: Schur.cpp:286
OP_SCHUR_ASSEMBLE_BASE
lapack_dgetri
static __CLPK_integer lapack_dgetri(__CLPK_integer n, __CLPK_doublereal *a, __CLPK_integer lda, __CLPK_integer *ipiv, __CLPK_doublereal *work, __CLPK_integer lwork)
Definition: lapack_wrap.h:185
MoFEM::FEMethod
structure for User Loop Methods on finite elements
Definition: LoopMethods.hpp:369
MoFEM::solve_schur_block_shell
static MoFEMErrorCode solve_schur_block_shell(Mat mat, Vec y, Vec x, InsertMode iora)
Definition: Schur.cpp:1599
MoFEM::MatSetValues< BlockStructure >
MoFEMErrorCode MatSetValues< BlockStructure >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2293
MoFEM::cmp_uid_hi
static auto cmp_uid_hi(const UId &b, const boost::weak_ptr< FieldEntity > &a)
Definition: ForcesAndSourcesCore.cpp:29
CHK_THROW_MESSAGE
#define CHK_THROW_MESSAGE(err, msg)
Check and throw MoFEM exception.
Definition: definitions.h:609
MoFEM::SchurElemMats::SchurElemStorage
multi_index_container< const SchurElemMats *, indexed_by< ordered_unique< tag< uid_mi_tag >, composite_key< SchurElemMats, member< SchurElemMats, const UId, &SchurElemMats::uidRow >, member< SchurElemMats, const UId, &SchurElemMats::uidCol > > >, ordered_non_unique< tag< col_mi_tag >, member< SchurElemMats, const UId, &SchurElemMats::uidCol > > > > SchurElemStorage
Definition: Schur.cpp:195
MoFEM::OpSchurAssembleBegin::OpSchurAssembleBegin
OpSchurAssembleBegin()
Definition: Schur.cpp:493
MoFEM::shell_block_mat_asmb_wrap_impl
MoFEMErrorCode shell_block_mat_asmb_wrap_impl(BlockStructure *ctx, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora, boost::function< int(const DiagBlockIndex::Indexes *)> shift_extractor, boost::shared_ptr< std::vector< double >> data_blocks_ptr)
Definition: Schur.cpp:1745
MoFEM::solve_add
static PetscErrorCode solve_add(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1226
MoFEM::OpSchurAssembleBase::OpSchurAssembleBase
OpSchurAssembleBase()=delete
lapack_dgesv
static __CLPK_integer lapack_dgesv(__CLPK_integer n, __CLPK_integer nrhs, __CLPK_doublereal *a, __CLPK_integer lda, __CLPK_integer *ipiv, __CLPK_doublereal *b, __CLPK_integer ldb)
Definition: lapack_wrap.h:176
MoFEM::Exceptions::MoFEMErrorCode
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
Definition: Exceptions.hpp:56
MoFEM::Types::MatrixDouble
UBlasMatrix< double > MatrixDouble
Definition: Types.hpp:77
MoFEM::OpSchurAssembleEnd< SchurDSYSV >
Definition: Schur.cpp:128
MoFEM::BlockStructure::ghostX
SmartPetscObj< Vec > ghostX
Definition: Schur.cpp:282
A
constexpr AssemblyType A
Definition: operators_tests.cpp:30
MoFEM::SchurElemMats::getMat
auto & getMat() const
Definition: Schur.cpp:156
MoFEM::SchurDSYSV
Definition: Schur.cpp:928
MoFEM::OpSchurAssembleEndImpl::fieldEnts
std::vector< boost::shared_ptr< Range > > fieldEnts
Definition: Schur.cpp:107
MoFEM::SchurBackendMatSetValuesPtr::MatSetValuesPtr
boost::function< MoFEMErrorCode(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)> MatSetValuesPtr
Definition: Schur.hpp:190
MoFEM::DiagBlockIndex::Indexes::getNbRows
int getNbRows() const
Definition: Schur.cpp:227
MoFEM::SchurEvents::SchurEvents
SchurEvents()
Definition: Schur.cpp:299
MoFEM::SchurEvents::MOFEM_EVENT_BlockStructureMult
static PetscLogEvent MOFEM_EVENT_BlockStructureMult
Definition: Schur.hpp:30
MoFEM::DiagBlockIndex::Indexes::col
int col
Definition: Schur.cpp:246
MoFEM::shell_block_preconditioner_mat_asmb_wrap
MoFEMErrorCode shell_block_preconditioner_mat_asmb_wrap(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:1920
MoFEM::SchurElemMats::colIndices
static boost::ptr_vector< VectorInt > colIndices
Definition: Schur.cpp:199
MoFEM::SchurEvents::MOFEM_EVENT_schurMatSetValues
static PetscLogEvent MOFEM_EVENT_schurMatSetValues
Definition: Schur.hpp:27
MoFEM::BlockStructure::ghostY
SmartPetscObj< Vec > ghostY
Definition: Schur.cpp:283
sdf.r
int r
Definition: sdf.py:8
MoFEM::OpSchurAssembleEndImpl::bM
MatrixDouble bM
Definition: Schur.cpp:114
MoFEM::OpSchurAssembleBaseImpl::OpSchurAssembleBaseImpl
OpSchurAssembleBaseImpl()=delete
MoFEM::DiagBlockIndex::Indexes::getCol
int getCol() const
Definition: Schur.cpp:226
ROW
@ ROW
Definition: definitions.h:136
MoFEM::OpSchurAssembleEndImpl::OpSchurAssembleEndImpl
OpSchurAssembleEndImpl(std::vector< std::string > fields_name, std::vector< boost::shared_ptr< Range >> field_ents, SmartPetscObj< AO > schur_ao, SmartPetscObj< Mat > schur_mat, bool sym_schur, bool symm_op)
Construct a new Op Schur Assemble End object.
Definition: Schur.cpp:510
MoFEM::DiagBlockIndex::Indexes
block data indexes
Definition: Schur.cpp:214
MoFEM::debug_schur
constexpr bool debug_schur
Definition: Schur.cpp:12
MoFEM::Interface
DeprecatedCoreInterface Interface
Definition: Interface.hpp:2010
MoFEM::DiagBlockIndex::Indexes::nb_rows
int nb_rows
Definition: Schur.cpp:247
MoFEM::schur_mat_set_values_wrap
MoFEMErrorCode schur_mat_set_values_wrap(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2256
c
const double c
speed of light (cm/ns)
Definition: initial_diffusion.cpp:39
MoFEM::SchurElemMats::col_mi_tag
Definition: Schur.cpp:176
MoFEM::DiagBlockIndex::Indexes::colShift
int colShift() const
Definition: Schur.cpp:237
MoFEM::SchurElemMats::iDX
const size_t iDX
Definition: Schur.cpp:152
MoFEM::SchurElemMats::uidCol
UId uidCol
Definition: Schur.cpp:154
PlasticOps::M
FTensor::Index< 'M', 3 > M
Definition: PlasticOps.hpp:117
MoFEM::OpSchurAssembleEnd< SchurDGESV >
Definition: Schur.cpp:136
CHKERR
#define CHKERR
Inline error check.
Definition: definitions.h:548
MoFEM::DiagBlockIndex::Indexes::getRow
int getRow() const
Definition: Schur.cpp:225
MoFEM::createDMVector
auto createDMVector(DM dm)
Get smart vector from DM.
Definition: DMMoFEM.hpp:1099
MoFEM::mult_add
static PetscErrorCode mult_add(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1211
MoFEM::createBlockMat
SchurShellMatData createBlockMat(DM dm, boost::shared_ptr< BlockStructure > data)
Create a Schur Mat object.
Definition: Schur.cpp:1379
MoFEM::OpSchurAssembleEndImpl::symSchur
bool symSchur
Definition: Schur.cpp:110
MoFEM::DMoFEMLoopFiniteElementsUpAndLowRank
PetscErrorCode DMoFEMLoopFiniteElementsUpAndLowRank(DM dm, const char fe_name[], MoFEM::FEMethod *method, int low_rank, int up_rank, CacheTupleWeakPtr cache_ptr=CacheTupleSharedPtr())
Executes FEMethod for finite elements in DM.
Definition: DMMoFEM.cpp:567
MoFEM::mat_zero
static PetscErrorCode mat_zero(Mat m)
Definition: Schur.cpp:1350
MoFEM::OpSchurAssembleBegin
Clear Schur complement internal data.
Definition: Schur.cpp:61
MoFEM::OpSchurAssembleEndImpl::abM
MatrixDouble abM
Definition: Schur.cpp:114
MoFEM::SchurElemMats::SchurElemMats
SchurElemMats(const size_t idx, const UId uid_row, const UId uid_col)
Definition: Schur.cpp:315
MoFEM
implementation of Data Operators for Forces and Sources
Definition: Common.hpp:10
MoFEM::SchurEvents::MOFEM_EVENT_BlockStructureSolve
static PetscLogEvent MOFEM_EVENT_BlockStructureSolve
Definition: Schur.hpp:31
a
constexpr double a
Definition: approx_sphere.cpp:30
MoFEM::schurSwitchPreconditioner
MoFEMErrorCode schurSwitchPreconditioner(boost::shared_ptr< BlockStructure > block_mat_data)
Switch preconditioner.
Definition: Schur.cpp:2365
BITFIELDID_SIZE
#define BITFIELDID_SIZE
max number of fields
Definition: definitions.h:233
MoFEM::SchurElemMats::assembleStorage
static MoFEMErrorCode assembleStorage(const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:320
MoFEM::DiagBlockIndex::Indexes::mat_shift
int mat_shift
Definition: Schur.cpp:251
MoFEM::FieldEntity::getHandleFromUniqueId
static auto getHandleFromUniqueId(const UId uid)
Get the Handle From Unique Id.
Definition: FieldEntsMultiIndices.hpp:192
MoFEM::Types::UId
uint128_t UId
Unique Id.
Definition: Types.hpp:31
MoFEM::SchurDGESV::invertMat
static auto invertMat(MatrixDouble &m, MatrixDouble &inv)
Definition: Schur.cpp:966
MoFEM::DiagBlockIndex::~DiagBlockIndex
virtual ~DiagBlockIndex()=default
MoFEM::SchurElemMats::uid_mi_tag
Definition: Schur.cpp:175
MoFEM::OpSchurAssembleEndImpl::blockMat
MatrixDouble blockMat
Definition: Schur.cpp:112
MoFEM::SchurElemMats::getColInd
auto & getColInd() const
Definition: Schur.cpp:158
MoFEM::BlockStructure::parentBlockStructurePtr
boost::shared_ptr< std::vector< double > > parentBlockStructurePtr
Definition: Schur.cpp:287
convert.type
type
Definition: convert.py:64
MoFEM::FieldEntity::getLocalUniqueIdCalculate
UId getLocalUniqueIdCalculate()
Get the Local Unique Id Calculate object.
Definition: FieldEntsMultiIndices.hpp:136
MoFEM::DiagBlockIndex::Indexes::rowShift
int rowShift() const
Definition: Schur.cpp:233
MoFEM::OpSchurAssembleEndImpl::abcM
MatrixDouble abcM
Definition: Schur.cpp:114
MoFEM::DiagBlockIndex::Indexes::uid_col
UId uid_col
Definition: Schur.cpp:243
MoFEM::SchurElemMats::~SchurElemMats
virtual ~SchurElemMats()=default
EshelbianPlasticity::P
@ P
Definition: EshelbianContact.cpp:197
MoFEM::mult_schur_block_shell
static MoFEMErrorCode mult_schur_block_shell(Mat mat, Vec x, Vec y, InsertMode iora, boost::function< int(DiagBlockIndex::BlockIndex::nth_index< 0 >::type::iterator)> shift_extractor, boost::shared_ptr< std::vector< double >> data_blocks_ptr, bool multiply_by_preconditioner)
Definition: Schur.cpp:1415
MoFEM::DiagBlockIndex::Indexes::getLocRow
int getLocRow() const
Definition: Schur.cpp:229
MoFEM::cmp_uid_lo
static auto cmp_uid_lo(const boost::weak_ptr< FieldEntity > &a, const UId &b)
Definition: ForcesAndSourcesCore.cpp:18
MoFEM::DiagBlockIndex::Indexes::loc_col
int loc_col
Definition: Schur.cpp:250
COL
@ COL
Definition: definitions.h:136
MoFEM::max_gemv_size
constexpr int max_gemv_size
Definition: Schur.cpp:1413
MoFEM::DiagBlockIndex::BlockIndex
multi_index_container< Indexes, indexed_by< ordered_unique< composite_key< Indexes, const_mem_fun< Indexes, UId, &Indexes::getRowUId >, const_mem_fun< Indexes, UId, &Indexes::getColUId > > >, ordered_non_unique< const_mem_fun< Indexes, UId, &Indexes::getFEUId > > > > BlockIndex
Definition: Schur.cpp:275
MoFEM::OpSchurAssembleBegin::doWork
MoFEMErrorCode doWork(int side, EntityType type, EntitiesFieldData::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
Definition: Schur.cpp:496
MoFEM::SchurElemMats::schurL2Storage
static SchurElemStorage schurL2Storage
Definition: Schur.cpp:203
MoFEM::SchurDSYSV::invertMat
static auto invertMat(MatrixDouble &m, MatrixDouble &inv)
Definition: Schur.cpp:930
MoFEM::EntitiesFieldData::EntData::getIndices
const VectorInt & getIndices() const
Get global indices of dofs on entity.
Definition: EntitiesFieldData.hpp:1214
MoFEM::OpSchurAssembleBaseImpl::assembleSchurMat
MoFEMErrorCode assembleSchurMat(Mat S, const UId &uid_row, VectorInt &row_ind, const UId &uid_col, VectorInt &col_ind, MatrixDouble &m, InsertMode iora)
Assemble Schur complement.
Definition: Schur.cpp:38
MoFEM::createOpSchurAssembleBegin
OpSchurAssembleBase * createOpSchurAssembleBegin()
Definition: Schur.cpp:2181
MoFEM::SchurElemMats::maxIndexCounter
static size_t maxIndexCounter
Definition: Schur.cpp:201
lapack_dgetrf
static __CLPK_integer lapack_dgetrf(__CLPK_integer m, __CLPK_integer n, __CLPK_doublereal *a, __CLPK_integer lda, __CLPK_integer *ipiv)
Definition: lapack_wrap.h:157
MoFEM::SchurEvents::MOFEM_EVENT_opSchurAssembleEnd
static PetscLogEvent MOFEM_EVENT_opSchurAssembleEnd
Definition: Schur.hpp:28
MoFEM::solve
static PetscErrorCode solve(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1223
MoFEM::SchurElemMatsBlock
Definition: Schur.cpp:2299
MoFEM::DiagBlockIndex::Indexes::getLocCol
int getLocCol() const
Definition: Schur.cpp:230
MOFEM_OPERATION_UNSUCCESSFUL
@ MOFEM_OPERATION_UNSUCCESSFUL
Definition: definitions.h:34
MoFEM::type_from_handle
auto type_from_handle(const EntityHandle h)
get type from entity handle
Definition: Templates.hpp:1898
MoFEM::EntitiesFieldData::EntData::getFieldDofs
const VectorDofs & getFieldDofs() const
get dofs data stature FEDofEntity
Definition: EntitiesFieldData.hpp:1269
MoFEM::mult
static PetscErrorCode mult(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1199
MoFEM::DiagBlockIndex::Indexes::getMatShift
int & getMatShift() const
Definition: Schur.cpp:231
MoFEM::createSchurNestedMatrix
std::pair< SmartPetscObj< Mat >, boost::shared_ptr< NestSchurData > > createSchurNestedMatrix(boost::shared_ptr< NestSchurData > schur_net_data_ptr)
Create a Mat Diag Blocks object.
Definition: Schur.cpp:2153
MoFEM::MatSetValues< SchurElemMatsPreconditionedBlock >
MoFEMErrorCode MatSetValues< SchurElemMatsPreconditionedBlock >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2356
i
FTensor::Index< 'i', SPACE_DIM > i
Definition: hcurl_divergence_operator_2d.cpp:27
MoFEM::SchurElemMatsPreconditionedBlock
Definition: Schur.cpp:2332
MoFEM::DiagBlockIndex::Indexes::getFEUId
UId getFEUId() const
Definition: Schur.cpp:224
MoFEM::SchurElemMats::schurElemMats
static boost::ptr_vector< SchurElemMats > schurElemMats
Definition: Schur.cpp:200
MoFEM::SchurElemMats::MatSetValues
static MoFEMErrorCode MatSetValues(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2270
MoFEM::OpSchurAssembleBase
Definition: Schur.hpp:40
MoFEM::setSchurMatSolvePC
MoFEMErrorCode setSchurMatSolvePC(SmartPetscObj< PC > pc)
Definition: Schur.cpp:2241
convert.n
n
Definition: convert.py:82
MoFEM::SchurElemMats
Schur complement data storage.
Definition: Schur.cpp:147
MoFEM::OpSchurAssembleEndImpl::fieldsName
std::vector< std::string > fieldsName
Definition: Schur.cpp:106
MoFEM::setSchurA00MatSolvePC
MoFEMErrorCode setSchurA00MatSolvePC(SmartPetscObj< PC > pc)
Set PC for A00 block.
Definition: Schur.cpp:2223
N
const int N
Definition: speed_test.cpp:3
MAX_DOFS_ON_ENTITY
#define MAX_DOFS_ON_ENTITY
Maximal number of DOFs on entity.
Definition: definitions.h:249
MoFEM::OpSchurAssembleEndImpl::schurMat
SmartPetscObj< Mat > schurMat
Definition: Schur.cpp:109
MoFEM::DiagBlockIndex::Indexes::Indexes
Indexes(UId uid_row, UId uid_col, UId uid_fe, int row, int col, int nb_rows, int nb_cols, int loc_row, int loc_col, int mat_shift)
Definition: Schur.cpp:216
v
const double v
phase velocity of light in medium (cm/ns)
Definition: initial_diffusion.cpp:40
block_mat
SmartPetscObj< Mat > block_mat
Definition: schur_test_diag_mat.cpp:39
MoFEM::setSchurBlockMatOps
static MoFEMErrorCode setSchurBlockMatOps(Mat mat_raw)
Definition: Schur.cpp:1362
MoFEM::EssentialBcStorage
[Storage and set boundary conditions]
Definition: FormsIntegrators.hpp:14
MoFEM::OpSchurAssembleEndImpl
Assemble Schur complement (Implementation)
Definition: Schur.cpp:74
MOFEM_TAG_AND_LOG
#define MOFEM_TAG_AND_LOG(channel, severity, tag)
Tag and log in channel.
Definition: LogManager.hpp:362
MoFEM::getInterfacePtr
auto getInterfacePtr(DM dm)
Get the Interface Ptr object.
Definition: DMMoFEM.hpp:1033
MOFEM_LOG
#define MOFEM_LOG(channel, severity)
Log.
Definition: LogManager.hpp:308
MoFEM::vectorDuplicate
SmartPetscObj< Vec > vectorDuplicate(Vec vec)
Create duplicate vector of smart vector.
Definition: PetscSmartObj.hpp:221
MoFEM::DiagBlockIndex::Indexes::loc_row
int loc_row
Definition: Schur.cpp:249
MoFEM::shell_block_mat_asmb_wrap
MoFEMErrorCode shell_block_mat_asmb_wrap(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:1900
MoFEM::MatSetValues< SchurElemMats >
MoFEMErrorCode MatSetValues< SchurElemMats >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2249
MoFEM::Core
CoreTmp< 0 > Core
Definition: Core.hpp:1148
HenckyOps::f
auto f
Definition: HenckyOps.hpp:15
MoFEM::Types::VectorInt
UBlasVector< int > VectorInt
Definition: Types.hpp:67
j
FTensor::Index< 'j', 3 > j
Definition: matrix_function.cpp:19
MoFEM::SchurElemMats::uidRow
UId uidRow
Definition: Schur.cpp:153
MoFEM::createSchurNestedMatrixStruture
boost::shared_ptr< NestSchurData > createSchurNestedMatrixStruture(std::pair< SmartPetscObj< DM >, SmartPetscObj< DM >> dms, boost::shared_ptr< BlockStructure > block_mat_data_ptr, std::vector< std::string > fields_names, std::vector< boost::shared_ptr< Range >> field_ents, bool add_preconditioner_block)
Get the Schur Nest Mat Array object.
Definition: Schur.cpp:1944
MoFEM::OpSchurAssembleEnd
Assemble Schur complement.
Definition: Schur.cpp:125
MoFEM::Exceptions::ierr
static MoFEMErrorCodeGeneric< PetscErrorCode > ierr
Definition: Exceptions.hpp:76
MoFEM::getProblemPtr
auto getProblemPtr(DM dm)
get problem pointer from DM
Definition: DMMoFEM.hpp:1044
EigenMatrix::Vec
const FTensor::Tensor2< T, Dim, Dim > Vec
Definition: MatrixFunction.hpp:66
MOFEM_DATA_INCONSISTENCY
@ MOFEM_DATA_INCONSISTENCY
Definition: definitions.h:31
MoFEM::DiagBlockIndex::Indexes::getRowUId
UId getRowUId() const
Definition: Schur.cpp:222
sdf_hertz_2d_axisymm_plane.d
float d
Definition: sdf_hertz_2d_axisymm_plane.py:4
FTensor::transform
Tensor2_Expr< transform_Tensor2< A, B, T, Dim0, Dim1, i, j >, T, Dim0, Dim1, i, j > transform(const Tensor2_Expr< A, T, Dim0, Dim1, i, j > &a, B function)
Definition: Tensor2_transform.hpp:27
MoFEM::SchurShellMatData
std::pair< SmartPetscObj< Mat >, boost::shared_ptr< BlockStructure > > SchurShellMatData
Definition: Schur.hpp:88
MoFEM::Types::VectorDouble
UBlasVector< double > VectorDouble
Definition: Types.hpp:68
MoFEM::DiagBlockIndex::Indexes::getNbCols
int getNbCols() const
Definition: Schur.cpp:228
MoFEM::DiagBlockIndex::Indexes::uid_row
UId uid_row
Definition: Schur.cpp:242
m
FTensor::Index< 'm', 3 > m
Definition: shallow_wave.cpp:80
MoFEM::BlockStructure::multiplyByPreconditioner
bool multiplyByPreconditioner
Definition: Schur.cpp:289
MoFEM::OpSchurAssembleBaseImpl
Definition: Schur.cpp:14
MoFEM::SchurElemMats::locMats
static boost::ptr_vector< MatrixDouble > locMats
Definition: Schur.cpp:197
MoFEM::createBlockMatStructure
boost::shared_ptr< BlockStructure > createBlockMatStructure(DM dm, SchurFEOpsFEandFields schur_fe_op_vec)
Create a Mat Diag Blocks object.
Definition: Schur.cpp:1009
MoFEMFunctionBeginHot
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:453
MoFEM::Unique_mi_tag
Definition: TagMultiIndices.hpp:18
MoFEM::DiagBlockIndex::Indexes::getColUId
UId getColUId() const
Definition: Schur.cpp:223
sdf_wavy_2d.ind
float ind
Definition: sdf_wavy_2d.py:7
MoFEM::SmartPetscObj< AO >
MoFEM::MatSetValues< SchurElemMatsBlock >
MoFEMErrorCode MatSetValues< SchurElemMatsBlock >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2325
MoFEM::ent_form_type_and_id
auto ent_form_type_and_id(const EntityType type, const EntityID id)
get entity handle from type and id
Definition: Templates.hpp:1906
MoFEM::BlockStructure
Definition: Schur.cpp:280
MoFEM::SchurDGESV
Definition: Schur.cpp:964
MoFEM::SchurEvents::MOFEM_EVENT_zeroRowsAndCols
static PetscLogEvent MOFEM_EVENT_zeroRowsAndCols
Definition: Schur.hpp:32
MoFEM::OpSchurAssembleEndImpl::schurAO
SmartPetscObj< AO > schurAO
Definition: Schur.cpp:108
MoFEM::schurSaveBlockMesh
MoFEMErrorCode schurSaveBlockMesh(boost::shared_ptr< BlockStructure > block_mat_data, std::string filename)
Save block matrix as a mesh.
Definition: Schur.cpp:2380
MoFEM::SchurElemMats::getRowInd
auto & getRowInd() const
Definition: Schur.cpp:157
MoFEM::FieldEntity::getFieldBitNumberFromUniqueId
static auto getFieldBitNumberFromUniqueId(const UId uid)
Get the Field Bit Number From Unique Id.
Definition: FieldEntsMultiIndices.hpp:205
convert.int
int
Definition: convert.py:64
MoFEMFunctionReturn
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:429
MOFEM_NOT_IMPLEMENTED
@ MOFEM_NOT_IMPLEMENTED
Definition: definitions.h:32
MoFEM::DiagBlockIndex::Indexes::uid_fe
UId uid_fe
Definition: Schur.cpp:244
MoFEM::SchurEvents::MOFEM_EVENT_BlockStructureSetValues
static PetscLogEvent MOFEM_EVENT_BlockStructureSetValues
Definition: Schur.hpp:29
MoFEMFunctionBegin
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:359
MoFEM::SchurFEOpsFEandFields
std::vector< std::pair< std::string, std::vector< SchurFieldPair > > > SchurFEOpsFEandFields
Definition: Schur.hpp:73
MoFEM::OpSchurAssembleEndImpl::doWorkImpl
MoFEMErrorCode doWorkImpl(int side, EntityType type, EntitiesFieldData::EntData &data)
Definition: Schur.cpp:521
MoFEM::isAllGather
auto isAllGather(IS is)
IS All gather.
Definition: PetscSmartObj.hpp:300