v0.14.0
Schur.cpp
Go to the documentation of this file.
1 /**
2  * @file Schur.cpp
3  * @brief Implementation of Schur Complement
4  * @date 2023-02-03
5  *
6  * @copyright Copyright (c) 2023
7  *
8  */
9 
10 namespace MoFEM {
11 
12 constexpr bool debug_schur = false;
13 
14 /**
15  * @brief Clear Schur complement internal data
16  *
17  */
19 
21 
22  MoFEMErrorCode doWork(int side, EntityType type,
24 };
25 
26 /**
27  * @brief Assemble Schur complement (Implementation)
28  *
29  */
31 
32  /**
33  * @brief Construct a new Op Schur Assemble End object
34  *
35  * @param fields_name list of fields
36  * @param field_ents list of entities on which schur complement is applied
37  * (can be empty)
38  * @param sequence_of_aos list of maps from base problem to Schur complement
39  * matrix
40  * @param sequence_of_mats list of Schur complement matrices
41  * @param sym_schur true if Schur complement is symmetric
42  * @param symm_op true if block diagonal is symmetric
43  */
45  std::vector<std::string> fields_name,
46  std::vector<boost::shared_ptr<Range>> field_ents,
47  std::vector<SmartPetscObj<AO>> sequence_of_aos,
48  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
49  std::vector<bool> sym_schur, bool symm_op = true,
50  boost::shared_ptr<BlockStructure> diag_blocks = nullptr);
51 
52  /**
53  * @brief Construct a new Op Schur Assemble End object
54  *
55  * @param fields_name list of fields
56  * @param field_ents list of entities on which schur complement is applied
57  * (can be empty)
58  * @param sequence_of_aos list of maps from base problem to Schur complement
59  * matrix
60  * @param sequence_of_mats list of Schur complement matrices
61  * @param sym_schur true if Schur complement is symmetric
62  * @param diag_eps add epsilon on diagonal of inverted matrix
63  * @param symm_op true if block diagonal is symmetric
64  */
66  std::vector<std::string> fields_name,
67  std::vector<boost::shared_ptr<Range>> field_ents,
68  std::vector<SmartPetscObj<AO>> sequence_of_aos,
69  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
70  std::vector<bool> sym_schur, std::vector<double> diag_eps,
71  bool symm_op = true,
72  boost::shared_ptr<BlockStructure> diag_blocks = nullptr);
73 
74 protected:
75  template <typename I>
76  MoFEMErrorCode doWorkImpl(int side, EntityType type,
78 
79  std::vector<std::string> fieldsName;
80  std::vector<boost::shared_ptr<Range>> fieldEnts;
81  std::vector<SmartPetscObj<AO>> sequenceOfAOs;
82  std::vector<SmartPetscObj<Mat>> sequenceOfMats;
83  std::vector<bool> symSchur;
84  std::vector<double> diagEps;
85 
90 
91  boost::shared_ptr<BlockStructure> diagBlocks;
92 };
93 
94 struct SchurDSYSV; ///< SY symmetric
95 struct SchurDGESV; ///< GE general (i.e., nonsymmetric, in some cases
96  ///< rectangular)
97 
98 /**
99  * @brief Assemble Schur complement
100  *
101  */
102 template <typename I> struct OpSchurAssembleEnd;
103 
104 template <>
107  MoFEMErrorCode doWork(int side, EntityType type,
109 };
110 
111 template <>
114  MoFEMErrorCode doWork(int side, EntityType type,
116 };
117 
118 /**
119  * @brief Schur complement data storage
120  *
121  */
122 struct SchurElemMats : public boost::enable_shared_from_this<SchurElemMats> {
123 
124  SchurElemMats(const size_t idx, const UId uid_row, const UId uid_col);
125  virtual ~SchurElemMats() = default;
126 
127  const size_t iDX;
130 
131  inline auto &getMat() const { return locMats[iDX]; }
132  inline auto &getRowInd() const { return rowIndices[iDX]; }
133  inline auto &getColInd() const { return colIndices[iDX]; }
134 
135  static MoFEMErrorCode MatSetValues(Mat M,
136  const EntitiesFieldData::EntData &row_data,
137  const EntitiesFieldData::EntData &col_data,
138  const MatrixDouble &mat, InsertMode iora);
139 
140 protected:
141  static MoFEMErrorCode
143  const EntitiesFieldData::EntData &col_data,
144  const MatrixDouble &mat, InsertMode iora);
145 
148 
149  struct uid_mi_tag {};
150  struct col_mi_tag {};
151 
152  using SchurElemStorage = multi_index_container<
153  const SchurElemMats *,
154  indexed_by<
155 
156  ordered_unique<
157  tag<uid_mi_tag>,
158  composite_key<
160 
161  member<SchurElemMats, const UId, &SchurElemMats::uidRow>,
162  member<SchurElemMats, const UId, &SchurElemMats::uidCol>
163 
164  >>,
165 
166  ordered_non_unique<tag<col_mi_tag>, member<SchurElemMats, const UId,
168 
169  >>;
170 
171  static boost::ptr_vector<MatrixDouble> locMats;
172  static boost::ptr_vector<VectorInt> rowIndices;
173  static boost::ptr_vector<VectorInt> colIndices;
174  static boost::ptr_vector<SchurElemMats> schurElemMats;
175  static size_t maxIndexCounter;
176 
178 };
179 
181 
182  virtual ~DiagBlockIndex() = default;
183 
184  /**
185  * @brief block data indexes
186  *
187  */
188  struct Indexes {
189 
190  Indexes(int row, int col, int nb_rows, int nb_cols, int loc_row,
191  int loc_col, int mat_shift, int inv_shift)
194  inv_shift(inv_shift) {}
195 
196  inline int getRow() const { return row; }
197  inline int getCol() const { return col; }
198  inline int getNbRows() const { return nb_rows; }
199  inline int getNbCols() const { return nb_cols; }
200  inline int getLocRow() const { return loc_row; }
201  inline int getLocCol() const { return loc_col; }
202  inline int &getMatShift() const { return mat_shift; }
203  inline int &getInvShift() const { return inv_shift; }
204 
205  inline int rowShift() const {
206  return getRow() + getNbRows();
207  } // shift such that lower bound is included
208 
209  inline int colShift() const {
210  return getCol() + getNbCols();
211  } // shift such that lower bound is included
212 
213  private:
214  int row;
215  int col;
216  int nb_rows;
217  int nb_cols;
218  int loc_row;
219  int loc_col;
220  mutable int mat_shift;
221  mutable int inv_shift;
222  };
223 
224  using BlockIndex = multi_index_container<
225 
226  Indexes,
227 
228  indexed_by<
229 
230  ordered_non_unique<
231 
232  composite_key<Indexes,
233 
234  const_mem_fun<Indexes, int, &Indexes::getLocRow>,
235  const_mem_fun<Indexes, int, &Indexes::getLocCol>,
236  const_mem_fun<Indexes, int, &Indexes::getNbRows>,
237  const_mem_fun<Indexes, int, &Indexes::getNbCols>>
238 
239  >,
240 
241  ordered_unique<
242 
243  composite_key<Indexes,
244 
245  const_mem_fun<Indexes, int, &Indexes::getRow>,
246  const_mem_fun<Indexes, int, &Indexes::getCol>,
247  const_mem_fun<Indexes, int, &Indexes::getNbRows>,
248  const_mem_fun<Indexes, int, &Indexes::getNbCols>>
249 
250  >,
251 
252  ordered_non_unique<
253 
254  const_mem_fun<Indexes, int, &Indexes::rowShift>
255 
256  >,
257 
258  ordered_non_unique<
259 
260  const_mem_fun<Indexes, int, &Indexes::colShift>
261 
262  >
263 
264  >>;
265 
266  BlockIndex blockIndex; ///< blocks indexes storage
267 };
268 
270 
273 
274  boost::shared_ptr<std::vector<double>> dataBlocksPtr;
275  boost::shared_ptr<std::vector<double>> dataInvBlocksPtr;
276  boost::shared_ptr<std::vector<double>> preconditionerBlocksPtr;
277  boost::shared_ptr<std::vector<double>> parentBlockStructurePtr;
278 
280 };
281 
283 
284  using SchurSolverView =
285  std::pair<std::vector<const Indexes *>, std::vector<int>>;
286 
288 };
289 
296 
298  PetscLogEventRegister("schurMatSetVal", 0, &MOFEM_EVENT_schurMatSetValues);
299  PetscLogEventRegister("opSchurAsmEnd", 0, &MOFEM_EVENT_opSchurAssembleEnd);
300  PetscLogEventRegister("blockSetVal", 0, &MOFEM_EVENT_BlockStructureSetValues);
301  PetscLogEventRegister("blockMult", 0, &MOFEM_EVENT_BlockStructureMult);
302  PetscLogEventRegister("blockSolve", 0, &MOFEM_EVENT_BlockStructureSolve);
303  PetscLogEventRegister("schurZeroRandC", 0, &MOFEM_EVENT_zeroRowsAndCols);
304 }
305 
307 boost::ptr_vector<MatrixDouble> SchurElemMats::locMats;
308 boost::ptr_vector<VectorInt> SchurElemMats::rowIndices;
309 boost::ptr_vector<VectorInt> SchurElemMats::colIndices;
310 boost::ptr_vector<SchurElemMats> SchurElemMats::schurElemMats;
312 
313 SchurElemMats::SchurElemMats(const size_t idx, const UId uid_row,
314  const UId uid_col)
315  : iDX(idx), uidRow(uid_row), uidCol(uid_col) {}
316 
319  const EntitiesFieldData::EntData &col_data,
320  const MatrixDouble &mat, InsertMode iora) {
322 
323 #ifndef NDEBUG
324  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_schurMatSetValues, 0, 0, 0, 0);
325 #endif // NDEBUG
326 
327  // get row indices, in case of store, get indices from storage
328  // storage keeps marked indices to manage boundary conditions
329  auto get_row_indices = [&]() -> const VectorInt & {
330  if (auto e_ptr = row_data.getFieldEntities()[0]) {
331  if (auto stored_data_ptr =
332  e_ptr->getSharedStoragePtr<EssentialBcStorage>()) {
333  return stored_data_ptr->entityIndices;
334  }
335  }
336  return row_data.getIndices();
337  };
338 
339  const auto &row_ind = get_row_indices();
340  const auto &col_ind = col_data.getIndices();
341 
342  const auto nb_rows = row_ind.size();
343  const auto nb_cols = col_ind.size();
344 
345 #ifndef NDEBUG
346  if (mat.size1() != nb_rows) {
347  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
348  "Wrong mat size %d != %d", mat.size1(), nb_rows);
349  }
350  if (mat.size2() != nb_cols) {
351  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
352  "Wrong mat size %d != %d", mat.size2(), nb_cols);
353  }
354 #endif // NDEBUG
355 
356  // get entity uid
357  auto get_uid = [](auto &data) {
358  if (data.getFieldEntities().size() == 1) {
359 
360  return data.getFieldEntities()[0]->getLocalUniqueId();
361 
362  } else {
363 
364  // Is assumed that sum of entities ids gives unique id, that is not true,
365  // but corner case is improbable.
366 
367  // @todo: debug version should have test
368 
369  auto &uid0 = data.getFieldEntities()[0]->getLocalUniqueId();
370  auto field_id0 = FieldEntity::getFieldBitNumberFromUniqueId(uid0);
371  auto ent0 = FieldEntity::getHandleFromUniqueId(uid0);
372  auto type0 = type_from_handle(ent0);
373  auto id = id_from_handle(ent0);
374 
375  for (auto i = 1; i < data.getFieldEntities().size(); ++i) {
376 
377  // get entity id from ent
378  id += id_from_handle(
379 
380  // get entity handle from unique uid
382  data.getFieldEntities()[i]->getLocalUniqueId())
383 
384  );
385  }
386 
388  field_id0,
389 
390  ent_form_type_and_id(type0, id)
391 
392  );
393  }
394  };
395 
396  auto uid_row = get_uid(row_data);
397  auto uid_col = get_uid(col_data);
398 
399  auto it =
400  SchurElemMats::schurL2Storage.template get<SchurElemMats::uid_mi_tag>()
401  .find(boost::make_tuple(uid_row, uid_col));
402 
403  if (it ==
404  SchurElemMats::schurL2Storage.template get<SchurElemMats::uid_mi_tag>()
405  .end()) {
406 
407  // get size of arrays of matrices
408  const auto size = SchurElemMats::locMats.size();
409 
410  // expand memory allocation
411  if (SchurElemMats::maxIndexCounter == size) {
412  SchurElemMats::locMats.push_back(new MatrixDouble());
413  SchurElemMats::rowIndices.push_back(new VectorInt());
414  SchurElemMats::colIndices.push_back(new VectorInt());
416  new SchurElemMats(SchurElemMats::maxIndexCounter, uid_row, uid_col));
417  } else {
419  uid_row;
421  uid_col;
422  }
423 
424  // add matrix to storage
425  auto p = SchurElemMats::schurL2Storage.emplace(
427 #ifndef NDEBUG
428  if (!p.second) {
429  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "Failed to insert");
430  }
431 #endif // NDEBUG
432 
433  auto asmb = [&](auto &sm) {
434  sm.resize(nb_rows, nb_cols, false);
435  noalias(sm) = mat;
436  };
437 
438  asmb((*p.first)->getMat());
439 
440  auto add_indices = [](auto &storage, auto &ind) {
441  storage.resize(ind.size(), false);
442  noalias(storage) = ind;
443  };
444 
445  add_indices((*p.first)->getRowInd(), row_ind);
446  add_indices((*p.first)->getColInd(), col_ind);
447 
448  } else {
449  // entry (submatrix) already exists
450 
451  auto asmb = [&](auto &sm) {
453 
454 #ifndef NDEBUG
455  if (sm.size1() != nb_rows) {
456  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
457  "Wrong mat or storage size %d != %d", sm.size1(), nb_rows);
458  }
459  if (sm.size2() != nb_cols) {
460  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
461  "Wrong mat or storage size %d != %d", sm.size2(), nb_cols);
462  }
463 #endif // NDEBUG
464 
465  switch (iora) {
466  case ADD_VALUES:
467  sm += mat;
468  break;
469  case INSERT_VALUES:
470  noalias(sm) = mat;
471  break;
472  default:
473  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED,
474  "Assembly type not implemented");
475  }
477  };
478 
479  CHKERR asmb((*it)->getMat());
480  // no need to set indices
481  }
482 
483 #ifndef NDEBUG
484  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_schurMatSetValues, 0, 0, 0, 0);
485 #endif // NDEBUG
486 
488 }
489 
491  : OpSchurAssembleBase(NOSPACE, OPSPACE) {}
492 
496 #ifndef NDEBUG
497  if constexpr (debug_schur)
498  MOFEM_LOG("SELF", Sev::noisy) << "Schur assemble begin";
499 #endif
502 
504 }
505 
507  std::vector<std::string> fields_name,
508  std::vector<boost::shared_ptr<Range>> field_ents,
509  std::vector<SmartPetscObj<AO>> sequence_of_aos,
510  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
511  std::vector<bool> sym_schur, std::vector<double> diag_eps, bool symm_op,
512  boost::shared_ptr<BlockStructure> diag_blocks)
513  : OpSchurAssembleBase(NOSPACE, OPSPACE, symm_op), fieldsName(fields_name),
514  fieldEnts(field_ents), sequenceOfAOs(sequence_of_aos),
515  sequenceOfMats(sequence_of_mats), symSchur(sym_schur), diagEps(diag_eps),
516  diagBlocks(diag_blocks) {}
517 
519  std::vector<std::string> fields_name,
520  std::vector<boost::shared_ptr<Range>> field_ents,
521  std::vector<SmartPetscObj<AO>> sequence_of_aos,
522  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
523  std::vector<bool> sym_schur, bool symm_op,
524  boost::shared_ptr<BlockStructure> diag_blocks)
526  fields_name, field_ents, sequence_of_aos, sequence_of_mats, sym_schur,
527  std::vector<double>(fields_name.size(), 0), symm_op, diag_blocks) {}
528 
529 template <typename I>
533 
535 
536 #ifndef NDEBUG
537  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_opSchurAssembleEnd, 0, 0, 0, 0);
538 #endif
539 
540 #ifndef NDEBUG
541  if constexpr (debug_schur)
542  MOFEM_LOG("SELF", Sev::noisy) << "Schur assemble begin -> end";
543 #endif
544 
545 #ifndef NDEBUG
546  auto get_field_name = [&](auto uid) {
549  };
550 #endif
551 
552  // Assemble Schur complement
553  auto assemble_mat = [&](SmartPetscObj<Mat> M, MatSetValuesRaw mat_set_values,
554  auto &storage) {
556  if (M) {
557  for (auto &s : storage) {
558  auto &m = s->getMat();
559  if (m.size1()) {
560  auto &row_ind = s->getRowInd();
561  auto &col_ind = s->getColInd();
562 
563  if (auto ierr = mat_set_values(M, row_ind.size(), &*row_ind.begin(),
564  col_ind.size(), &*col_ind.begin(),
565  &*m.data().begin(), ADD_VALUES)) {
566 #ifndef NDEBUG
567  auto field_ents = getPtrFE()->mField.get_field_ents();
568  auto row_ent_it = field_ents->find(s->uidRow);
569  auto col_ent_it = field_ents->find(s->uidCol);
570  MOFEM_LOG_CHANNEL("SELF");
571  if (row_ent_it != field_ents->end())
572  MOFEM_LOG("SELF", Sev::error)
573  << "Assemble row entity: " << (*row_ent_it)->getName() << " "
574  << (*col_ent_it)->getEntTypeName() << " side "
575  << (*row_ent_it)->getSideNumber();
576  if (col_ent_it != field_ents->end())
577  MOFEM_LOG("SELF", Sev::error)
578  << "Assemble col entity: " << (*col_ent_it)->getName() << " "
579  << (*col_ent_it)->getEntTypeName() << " side "
580  << (*col_ent_it)->getSideNumber();
581 #endif // NDEBUG
582  CHK_THROW_MESSAGE(ierr, "MatSetValues");
583  }
584  }
585  }
586  }
588  };
589 
590  auto apply_schur = [&](auto &storage,
591 
592  auto lo_uid, auto hi_uid,
593 
594  double eps, bool symm) {
596 
597  // add off diagonal matrix, i.e. schur complement
598  auto add_off_mat = [&](auto uid_row, auto uid_col, auto &row_ind,
599  auto &col_ind, auto &offMatInvDiagOffMat) {
601 
602  auto it = storage.template get<SchurElemMats::uid_mi_tag>().find(
603  boost::make_tuple(uid_row, uid_col));
604 
605  if (it == storage.template get<SchurElemMats::uid_mi_tag>().end()) {
606 
607  const auto size = SchurElemMats::locMats.size();
608 
609  if (SchurElemMats::maxIndexCounter == size) {
610  SchurElemMats::locMats.push_back(new MatrixDouble());
611  SchurElemMats::rowIndices.push_back(new VectorInt());
612  SchurElemMats::colIndices.push_back(new VectorInt());
614  SchurElemMats::maxIndexCounter, uid_row, uid_col));
615  } else {
617  uid_row;
619  uid_col;
620  }
621 
622  auto p = SchurElemMats::schurL2Storage.emplace(
624 #ifndef NDEBUG
625  if (!p.second) {
626  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
627  "Failed to insert");
628  }
629 #endif // NDEBUG
630 
631  auto &mat = (*p.first)->getMat();
632  auto &set_row_ind = (*p.first)->getRowInd();
633  auto &set_col_ind = (*p.first)->getColInd();
634 
635  set_row_ind.resize(row_ind.size(), false);
636  noalias(set_row_ind) = row_ind;
637  set_col_ind.resize(col_ind.size(), false);
638  noalias(set_col_ind) = col_ind;
639 
640  mat.resize(offMatInvDiagOffMat.size1(), offMatInvDiagOffMat.size2(),
641  false);
642  noalias(mat) = offMatInvDiagOffMat;
643 
644 #ifndef NDEBUG
645  if constexpr (debug_schur) {
646  MOFEM_LOG("SELF", Sev::noisy) << "insert: " << get_field_name(uid_row)
647  << " " << get_field_name(uid_col) << " "
648  << mat.size1() << " " << mat.size2();
649  }
650 #endif // NDEBUG
651 
652  } else {
653 
654  auto &mat = (*it)->getMat();
655 #ifndef NDEBUG
656  if constexpr (debug_schur) {
657  MOFEM_LOG("SELF", Sev::noisy) << "add: " << get_field_name(uid_row)
658  << " " << get_field_name(uid_col) << " "
659  << mat.size1() << " " << mat.size2();
660  }
661 #endif // NDEBUG
662 
663 #ifndef NDEBUG
664  if (mat.size1() != offMatInvDiagOffMat.size1()) {
665  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
666  "Wrong size %d != %d", mat.size1(),
667  offMatInvDiagOffMat.size1());
668  }
669  if (mat.size2() != offMatInvDiagOffMat.size2()) {
670  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
671  "Wrong size %d != %d", mat.size2(),
672  offMatInvDiagOffMat.size2());
673  }
674 #endif // NDEBUG
675  mat += offMatInvDiagOffMat;
676  }
678  };
679 
680  auto get_row_view = [&]() {
681  auto row_it =
682  storage.template get<SchurElemMats::uid_mi_tag>().lower_bound(
683  boost::make_tuple(lo_uid, 0));
684  auto hi_row_it =
685  storage.template get<SchurElemMats::uid_mi_tag>().upper_bound(
686  boost::make_tuple(
688  std::vector<const SchurElemMats *> schur_row_ptr_view;
689  schur_row_ptr_view.reserve(std::distance(row_it, hi_row_it));
690  for (; row_it != hi_row_it; ++row_it) {
691  schur_row_ptr_view.push_back(*row_it);
692  }
693  return schur_row_ptr_view;
694  };
695 
696  auto get_col_view = [&]() {
697  auto col_it =
698  storage.template get<SchurElemMats::col_mi_tag>().lower_bound(lo_uid);
699  auto hi_col_it =
700  storage.template get<SchurElemMats::col_mi_tag>().upper_bound(hi_uid);
701  std::vector<const SchurElemMats *> schur_col_ptr_view;
702  schur_col_ptr_view.reserve(std::distance(col_it, hi_col_it));
703  for (; col_it != hi_col_it; ++col_it) {
704  schur_col_ptr_view.push_back(*col_it);
705  }
706  return schur_col_ptr_view;
707  };
708 
709  auto schur_row_ptr_view = get_row_view();
710  auto schur_col_ptr_view = get_col_view();
711 
712  // iterate row entities
713  for (auto row_it : schur_row_ptr_view) {
714  // only diagonals to get inverted diagonal
715  if (row_it->uidRow == row_it->uidCol) {
716 #ifndef NDEBUG
717  if constexpr (debug_schur) {
718  MOFEM_LOG("SELF", Sev::noisy)
719  << "invert: uid_row " << get_field_name(row_it->uidRow)
720  << " row uid " << get_field_name(row_it->uidCol) << " : "
721  << (*row_it)->getMat().size1() << " "
722  << (*row_it)->getMat().size2();
723  }
724 #endif // NDEBUG
725 
726  // note invMat is multiplied by -1
727  CHKERR I::invertMat(row_it, invMat, eps);
728 
729  // iterate column entities
730  for (auto c_lo : schur_col_ptr_view) {
731 #ifndef NDEBUG
732  if (c_lo->uidCol != row_it->uidRow)
733  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
734  "Wrong size %d != %d", c_lo->uidCol, row_it->uidRow);
735 #endif // NDEBUG
736 
737  auto &uid_row = c_lo->uidRow;
738  if (uid_row == row_it->uidRow) {
739  continue;
740  }
741 
742  auto &cc_off_mat = c_lo->getMat();
743  invDiagOffMat.resize(cc_off_mat.size1(), invMat.size2(), false);
744 #ifndef NDEBUG
745  if (invMat.size1() != cc_off_mat.size2()) {
746  MOFEM_LOG("SELF", Sev::error)
747  << "row_uid " << get_field_name(row_it->uidRow) << " row uid "
748  << get_field_name(uid_row);
749  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
750  "Wrong size %d != %d", invMat.size1(), cc_off_mat.size2());
751  }
752 #endif // NDEBUG
753 
754  // noalias(invDiagOffMat) = prod(cc_off_mat, invMat);
755  // A10*inv(A00)
756  cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
757  cc_off_mat.size1(), invMat.size2(), cc_off_mat.size2(),
758  1., &*cc_off_mat.data().begin(), cc_off_mat.size2(),
759  &*invMat.data().begin(), invMat.size2(), 0.,
760  &*invDiagOffMat.data().begin(), invDiagOffMat.size2());
761 
762  // iterate row entities
763  for (auto r_lo : schur_row_ptr_view) {
764 #ifndef NDEBUG
765  if (c_lo->uidCol != row_it->uidRow)
766  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
767  "Wrong size %d != %d", c_lo->uidCol, row_it->uidRow);
768 #endif // NDEBUG
769 
770  auto &uid_col = r_lo->uidCol;
771 
772  // Skip diagonal
773  if (uid_col == row_it->uidRow) {
774  continue;
775  }
776 
777  // If symmetry only run upper off diagonal terms
778  if (symm && uid_row > uid_col) {
779  continue;
780  }
781 
782  auto &rr_off_mat = r_lo->getMat();
783  offMatInvDiagOffMat.resize(invDiagOffMat.size1(),
784  rr_off_mat.size2(), false);
785 #ifndef NDEBUG
786  if (rr_off_mat.size1() != invDiagOffMat.size2()) {
787  MOFEM_LOG("SELF", Sev::error)
788  << "uid_row " << get_field_name(row_it->uidRow)
789  << ": col uid " << get_field_name(uid_col) << " row uid "
790  << get_field_name(uid_row);
791  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
792  "Wrong size %d != %d", rr_off_mat.size1(),
793  invDiagOffMat.size2());
794  }
795 #endif // NDEBUG
796 
797  // noalias(offMatInvDiagOffMat) = prod(invDiagOffMat, rr_off_mat);
798  // A10*inv(A00)*A01
799  cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
800  invDiagOffMat.size1(), rr_off_mat.size2(),
801  invDiagOffMat.size2(), 1.,
802  &*invDiagOffMat.data().begin(), invDiagOffMat.size2(),
803  &*rr_off_mat.data().begin(), rr_off_mat.size2(), 0.,
804  &*offMatInvDiagOffMat.data().begin(),
805  offMatInvDiagOffMat.size2());
806 
807  // Make on diagonal A11 have Schur complement S
808  CHKERR add_off_mat(uid_row, uid_col, c_lo->getRowInd(),
809  r_lo->getColInd(), offMatInvDiagOffMat);
810 
811  if (symm && uid_row != uid_col) {
813  offMatInvDiagOffMat.size1(),
814  false);
816  CHKERR add_off_mat(uid_col, uid_row, r_lo->getColInd(),
817  c_lo->getRowInd(), transOffMatInvDiagOffMat);
818  }
819  }
820  }
821  }
822  }
823 
825  };
826 
827  auto erase_factored = [&](auto &storage, auto lo_uid, auto hi_uid) {
829 
830  auto r_lo = storage.template get<SchurElemMats::uid_mi_tag>().lower_bound(
831  boost::make_tuple(lo_uid, 0));
832  auto r_hi = storage.template get<SchurElemMats::uid_mi_tag>().upper_bound(
833  boost::make_tuple(hi_uid,
835  storage.template get<SchurElemMats::uid_mi_tag>().erase(r_lo, r_hi);
836 
837  auto c_lo =
838  storage.template get<SchurElemMats::col_mi_tag>().lower_bound(lo_uid);
839  auto c_hi =
840  storage.template get<SchurElemMats::col_mi_tag>().upper_bound(hi_uid);
841  storage.template get<SchurElemMats::col_mi_tag>().erase(c_lo, c_hi);
842 
844  };
845 
846  auto assemble_S = [&](auto &storage, auto ao, auto mat) {
848 
849  // apply AO
850  if (ao) {
851  for (auto &m : storage) {
852  auto &ind_row = m->getRowInd();
853  CHKERR AOApplicationToPetsc(ao, ind_row.size(), &*ind_row.begin());
854  auto &ind_col = m->getColInd();
855  CHKERR AOApplicationToPetsc(ao, ind_col.size(), &*ind_col.begin());
856  }
857  }
858 
859  // assemble matrix
860  if (mat) {
861  CHKERR assemble_mat(mat, matSetValuesSchurRaw, storage);
862  }
863 
865  };
866 
867  auto assemble_A00 = [&](auto &storage, auto lo_uid, auto hi_uid) {
869 
870  auto add = [&](auto lo, auto hi) {
872  for (; lo != hi; ++lo) {
873  auto &m = (*lo)->getMat();
874  if (m.size1() && m.size2()) {
875  auto row_ind = (*lo)->getRowInd()[0];
876  auto col_ind = (*lo)->getColInd()[0];
877  auto nb_rows = m.size1();
878  auto nb_cols = m.size2();
879  auto it = diagBlocks->blockIndex.get<1>().find(
880  boost::make_tuple(row_ind, col_ind, nb_rows, nb_cols));
881  if (it != diagBlocks->blockIndex.get<1>().end()) {
882  auto inv_shift = it->getInvShift();
883  if (inv_shift != -1) {
884 #ifndef NDEBUG
885  if (!diagBlocks->dataInvBlocksPtr)
886  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
887  "No dataInvBlocksPtr");
888 #endif // NDEBUG
889  auto *ptr = &((*diagBlocks->dataInvBlocksPtr)[inv_shift]);
890  if (row_ind == col_ind && nb_rows == nb_cols) {
891  // assemble inverted diag
892  std::copy(invMat.data().begin(), invMat.data().end(), ptr);
893  } else {
894  // assemble of diag terms, witch might be changed by Schur
895  // complement
896  std::copy(m.data().begin(), m.data().end(), ptr);
897  }
898  }
899  }
900  }
901  }
903  };
904 
905  CHKERR add(
906 
907  storage.template get<SchurElemMats::uid_mi_tag>().lower_bound(
908  boost::make_tuple(lo_uid, 0)),
909  storage.template get<SchurElemMats::uid_mi_tag>().upper_bound(
910  boost::make_tuple(
912 
913  );
914 
916  };
917 
918  auto get_a00_uids = [&]() {
919  auto get_field_bit = [&](auto &name) {
920  return getPtrFE()->mField.get_field_bit_number(name);
921  };
922 
923  std::vector<std::pair<UId, UId>> a00_uids;
924  a00_uids.reserve(fieldsName.size());
925  for (auto ss = 0; ss != fieldsName.size(); ++ss) {
926  auto field_bit = get_field_bit(fieldsName[ss]);
927  auto row_ents = fieldEnts[ss];
928  if (row_ents) {
929  for (auto p = row_ents->pair_begin(); p != row_ents->pair_end(); ++p) {
930  auto lo_uid =
931  FieldEntity::getLoLocalEntityBitNumber(field_bit, p->first);
932  auto hi_uid =
933  FieldEntity::getHiLocalEntityBitNumber(field_bit, p->second);
934  a00_uids.push_back(std::make_pair(lo_uid, hi_uid));
935  }
936  } else {
938  field_bit, get_id_for_min_type<MBVERTEX>());
940  field_bit, get_id_for_max_type<MBENTITYSET>());
941  a00_uids.push_back(std::make_pair(lo_uid, hi_uid));
942  }
943  }
944  return a00_uids;
945  };
946 
947 #ifndef NDEBUG
948  auto list_storage = [&](auto &storage) {
950  int i = 0;
951  for (auto &p : storage) {
952  MOFEM_LOG("SELF", Sev::noisy)
953  << "List schur storage: " << i << " " << p->iDX << ": "
954  << get_field_name(p->uidRow) << " " << get_field_name(p->uidCol)
955  << " : " << p->getMat().size1() << " " << p->getMat().size2();
956  ++i;
957  }
959  };
960 #endif // NDEBUG
961 
962  auto assemble = [&](auto &&a00_uids) {
964  auto &storage = SchurElemMats::schurL2Storage;
965  int ss = 0;
966  for (auto &p : a00_uids) {
967  auto [lo_uid, hi_uid] = p;
968 #ifndef NDEBUG
969  if constexpr (debug_schur) {
970  list_storage(storage);
971  MOFEM_LOG("SELF", Sev::noisy)
972  << "Schur assemble: " << get_field_name(lo_uid) << " - "
973  << get_field_name(hi_uid);
974  }
975 #endif
976  CHKERR apply_schur(storage, lo_uid, hi_uid, diagEps[ss], symSchur[ss]);
977  if (diagBlocks)
978  CHKERR assemble_A00(storage, lo_uid, hi_uid);
979  CHKERR erase_factored(storage, lo_uid, hi_uid);
980  CHKERR assemble_S(storage, sequenceOfAOs[ss], sequenceOfMats[ss]);
981  ++ss;
982  }
984  };
985 
986  // Assemble Schur complements
987  CHKERR assemble(get_a00_uids());
988 
989 #ifndef NDEBUG
990  if constexpr (debug_schur)
991  MOFEM_LOG("SELF", Sev::noisy) << "Schur assemble done";
992 #endif
993 
994 #ifndef NDEBUG
995  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_opSchurAssembleEnd, 0, 0, 0, 0);
996 #endif
997 
999 }
1000 
1001 struct SchurDSYSV {
1002  static auto invertMat(const SchurElemMats *row_ptr, MatrixDouble &inv,
1003  double eps) {
1005 
1006  auto &m = row_ptr->getMat();
1007 
1008  VectorInt ipiv;
1009  VectorDouble lapack_work;
1010  const int nb = m.size1();
1011 
1012  if (eps) {
1013  for (int cc = 0; cc != nb; ++cc) {
1014  m(cc, cc) += eps;
1015  }
1016  }
1017 
1018  inv.resize(nb, nb, false);
1019  inv.clear();
1020  auto ptr = &*inv.data().begin();
1021  for (int c = 0; c != nb; ++c, ptr += nb + 1)
1022  *ptr = -1;
1023  ipiv.resize(nb, false);
1024  lapack_work.resize(nb * nb, false);
1025  const auto info =
1026  lapack_dsysv('L', nb, nb, &*m.data().begin(), nb, &*ipiv.begin(),
1027  &*inv.data().begin(), nb, &*lapack_work.begin(), nb * nb);
1028  if (info != 0)
1029  SETERRQ1(PETSC_COMM_SELF, MOFEM_OPERATION_UNSUCCESSFUL,
1030  "Can not invert matrix info = %d", info);
1032  };
1033 };
1034 
1035 struct SchurDGESV {
1036  static auto invertMat(const SchurElemMats *row_ptr, MatrixDouble &inv,
1037  double eps) {
1039 
1040  auto &m = row_ptr->getMat();
1041 
1042  VectorInt ipiv;
1043  const auto nb = m.size1();
1044 #ifndef NDEBUG
1045  if (nb != m.size2()) {
1046  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1047  "It should be square matrix %d != %d", nb, m.size2());
1048  }
1049 #endif
1050 
1051  if (eps) {
1052  for (int cc = 0; cc != nb; ++cc) {
1053  m(cc, cc) += eps;
1054  }
1055  }
1056 
1057  inv.resize(nb, nb, false);
1058  inv.clear();
1059  auto ptr = &*inv.data().begin();
1060  for (int c = 0; c != nb; ++c, ptr += nb + 1)
1061  *ptr = -1;
1062  ipiv.resize(nb, false);
1063  const auto info = lapack_dgesv(nb, nb, &*m.data().begin(), nb,
1064  &*ipiv.begin(), &*inv.data().begin(), nb);
1065  if (info != 0)
1066  SETERRQ1(PETSC_COMM_SELF, MOFEM_OPERATION_UNSUCCESSFUL,
1067  "Can not invert matrix info = %d", info);
1069  };
1070 };
1071 
1075  return doWorkImpl<SchurDSYSV>(side, type, data);
1076 }
1077 
1081  return doWorkImpl<SchurDGESV>(side, type, data);
1082 }
1083 
1084 boost::shared_ptr<BlockStructure> createBlockMatStructure(
1085 
1086  DM dm, //< dm
1087  SchurFEOpsFEandFields schur_fe_op_vec //< block elements
1088 
1089 ) {
1090 
1091  auto cmp_uid_lo = [](const boost::weak_ptr<FieldEntity> &a, const UId &b) {
1092  if (auto a_ptr = a.lock()) {
1093  if (a_ptr->getLocalUniqueId() < b)
1094  return true;
1095  else
1096  return false;
1097  } else {
1098  return false;
1099  }
1100  };
1101 
1102  auto cmp_uid_hi = [](const UId &b, const boost::weak_ptr<FieldEntity> &a) {
1103  if (auto a_ptr = a.lock()) {
1104  if (b < a_ptr->getLocalUniqueId())
1105  return true;
1106  else
1107  return false;
1108  } else {
1109  return true;
1110  }
1111  };
1112 
1113  // get uids for fields
1114  auto get_uid_pair = [](const auto &field_id) {
1116  field_id, get_id_for_min_type<MBVERTEX>());
1118  field_id, get_id_for_max_type<MBENTITYSET>());
1119  return std::make_pair(lo_uid, hi_uid);
1120  };
1121 
1122  // get uids pair
1123  auto get_it_pair = [cmp_uid_lo, cmp_uid_hi](auto &&field_ents, auto &&p_uid) {
1124  auto lo = std::lower_bound(field_ents.begin(), field_ents.end(),
1125  p_uid.first, cmp_uid_lo);
1126  auto hi = std::upper_bound(field_ents.begin(), field_ents.end(),
1127  p_uid.second, cmp_uid_hi);
1128  return std::make_pair(lo, hi);
1129  };
1130 
1131  auto row_extractor = [](auto &e) { return e->entityCacheRowDofs; };
1132  auto col_extractor = [](auto &e) { return e->entityCacheColDofs; };
1133 
1134  auto extract_data = [](auto &&its, auto extractor) {
1135  std::vector<std::tuple<int, int, int>> data;
1136  data.reserve(std::distance(its.first, its.second));
1137  for (; its.first != its.second; ++its.first) {
1138  if (auto e = its.first->lock()) {
1139  if (auto cache = extractor(e).lock()) {
1140  auto nb_dofs = std::distance(cache->loHi[0], cache->loHi[1]);
1141  if (nb_dofs) {
1142  auto glob = (*cache->loHi[0])->getPetscGlobalDofIdx();
1143  auto loc = (*cache->loHi[0])->getPetscLocalDofIdx();
1144  data.emplace_back(glob, nb_dofs, loc);
1145 
1146 #ifndef NDEBUG
1147 
1148  for (auto lo = cache->loHi[0]; lo != cache->loHi[1]; ++lo) {
1149  auto glob = (*lo)->getPetscGlobalDofIdx();
1150  if (glob == -1) {
1152  "Wrong global index");
1153  }
1154  }
1155 
1156 #endif
1157  }
1158  }
1159  }
1160  }
1161  return data;
1162  };
1163 
1164  auto data_ptr = boost::make_shared<BlockStructure>();
1165  auto m_field_ptr = getInterfacePtr(dm);
1166 
1167  // create element to extract data
1168  auto fe_method = boost::shared_ptr<MoFEM::FEMethod>(new MoFEM::FEMethod());
1169 
1170  for (auto &d : schur_fe_op_vec) {
1171 
1172  // extract bit numbers for row and column
1173  auto get_bit_numbers = [&d](auto op) {
1174  std::vector<FieldBitNumber> bit_numbers(d.second.size());
1175  std::transform(d.second.begin(), d.second.end(), bit_numbers.begin(), op);
1176  return bit_numbers;
1177  };
1178 
1179  // extract bit numbers for row
1180  auto row_bit_numbers = get_bit_numbers(
1181  [&](auto &p) { return m_field_ptr->get_field_bit_number(p.first); });
1182  // extract bit numbers for row
1183  auto col_bit_numbers = get_bit_numbers(
1184  [&](auto &p) { return m_field_ptr->get_field_bit_number(p.second); });
1185 
1186  fe_method->preProcessHook = []() { return 0; };
1187  fe_method->postProcessHook = []() { return 0; };
1188  fe_method->operatorHook = [&]() {
1190 
1191  for (auto f = 0; f != row_bit_numbers.size(); ++f) {
1192 
1193  auto row_data =
1194  extract_data(get_it_pair(fe_method->getRowFieldEnts(),
1195  get_uid_pair(row_bit_numbers[f])),
1196  row_extractor);
1197  auto col_data =
1198  extract_data(get_it_pair(fe_method->getColFieldEnts(),
1199  get_uid_pair(col_bit_numbers[f])),
1200  col_extractor);
1201 
1202  for (auto &r : row_data) {
1203  auto [r_glob, r_nb_dofs, r_loc] = r;
1204  for (auto &c : col_data) {
1205  auto [c_glob, c_nb_dofs, c_loc] = c;
1206  if (r_glob != -1 && c_glob != -1) {
1207  data_ptr->blockIndex.insert(BlockStructure::Indexes{
1208  r_glob, c_glob, r_nb_dofs, c_nb_dofs, r_loc, c_loc, -1, -1});
1209  }
1210  }
1211  }
1212  }
1213 
1215  };
1216 
1217  CHKERR DMoFEMLoopFiniteElements(dm, d.first, fe_method);
1218  };
1219 
1220  // order by column (that is for matrix multiplication)
1221  auto mem_size = 0;
1222  for (auto &v : data_ptr->blockIndex.get<0>()) {
1223  v.getMatShift() = mem_size;
1224  mem_size += v.getNbCols() * v.getNbRows();
1225  }
1226 
1227  std::vector<double> tmp;
1228  if (mem_size > tmp.max_size())
1230 
1231  data_ptr->dataBlocksPtr =
1232  boost::make_shared<std::vector<double>>(mem_size, 0);
1233 
1234  auto ghost_x = createDMVector(dm);
1235  auto ghost_y = createDMVector(dm);
1236  CHKERR VecSetDM(ghost_x, PETSC_NULL);
1237  CHKERR VecSetDM(ghost_y, PETSC_NULL);
1238 
1239  data_ptr->ghostX = ghost_x;
1240  data_ptr->ghostY = ghost_y;
1241 
1242  return data_ptr;
1243 }
1244 
1245 static MoFEMErrorCode mult_schur_block_shell(Mat mat, Vec x, Vec y,
1246  InsertMode iora);
1247 
1248 static MoFEMErrorCode solve_schur_block_shell(Mat mat, Vec x, Vec y,
1249  InsertMode iora);
1250 
1251 static PetscErrorCode mult(Mat mat, Vec x, Vec y) {
1252  return mult_schur_block_shell(mat, x, y, INSERT_VALUES);
1253 }
1254 static PetscErrorCode mult_add(Mat mat, Vec x, Vec y) {
1255  return mult_schur_block_shell(mat, x, y, ADD_VALUES);
1256 }
1257 static PetscErrorCode solve(Mat mat, Vec x, Vec y) {
1258  return solve_schur_block_shell(mat, x, y, INSERT_VALUES);
1259 }
1260 static PetscErrorCode solve_add(Mat mat, Vec x, Vec y) {
1261  return solve_schur_block_shell(mat, x, y, ADD_VALUES);
1262 }
1263 
1264 static PetscErrorCode zero_rows_columns(Mat A, PetscInt N,
1265  const PetscInt rows[], PetscScalar diag,
1266  Vec x, Vec b) {
1267 
1269  BlockStructure *ctx;
1270  CHKERR MatShellGetContext(A, (void **)&ctx);
1271 
1272  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_zeroRowsAndCols, 0, 0, 0, 0);
1273 
1274  const int *ptr = &rows[0];
1275  int is_nb_rows = N;
1276  SmartPetscObj<IS> is_local;
1277 
1278  MPI_Comm comm;
1279  CHKERR PetscObjectGetComm((PetscObject)A, &comm);
1280  int size;
1281  MPI_Comm_size(comm, &size);
1282  if (size > 1) {
1283  auto is = createISGeneral(comm, N, rows, PETSC_USE_POINTER);
1284  is_local = isAllGather(is);
1285  }
1286  if (is_local) {
1287  CHKERR ISGetSize(is_local, &is_nb_rows);
1288 #ifndef NDEBUG
1289  if constexpr (debug_schur) {
1290  CHKERR ISView(is_local, PETSC_VIEWER_STDOUT_WORLD);
1291  }
1292 #endif
1293  CHKERR ISGetIndices(is_local, &ptr);
1294  }
1295 
1296  int loc_m, loc_n;
1297  CHKERR MatGetLocalSize(A, &loc_m, &loc_n);
1298 
1299  for (auto n = 0; n != is_nb_rows; ++n) {
1300  auto row = ptr[n];
1301  auto rlo = ctx->blockIndex.get<2>().lower_bound(row);
1302  auto rhi = ctx->blockIndex.get<2>().end();
1303  for (; rlo != rhi; ++rlo) {
1304  auto r_shift = row - rlo->getRow();
1305  if (r_shift >= 0 && r_shift < rlo->getNbRows()) {
1306  auto *ptr = &(*ctx->dataBlocksPtr)[rlo->getMatShift()];
1307  for (auto i = 0; i != rlo->getNbCols(); ++i) {
1308  ptr[i + r_shift * rlo->getNbCols()] = 0;
1309  }
1310  } else if (rlo->getRow() + rlo->getNbRows() > row) {
1311  break;
1312  }
1313  }
1314  }
1315 
1316  for (auto n = 0; n != is_nb_rows; ++n) {
1317  auto col = ptr[n];
1318  auto clo = ctx->blockIndex.get<3>().lower_bound(col);
1319  auto chi = ctx->blockIndex.get<3>().end();
1320  for (; clo != chi; ++clo) {
1321  auto c_shift = col - clo->getCol();
1322  if (c_shift >= 0 && c_shift < clo->getNbCols()) {
1323 
1324  auto *ptr = &(*ctx->dataBlocksPtr)[clo->getMatShift()];
1325  for (auto i = 0; i != clo->getNbRows(); ++i) {
1326  ptr[c_shift + i * clo->getNbCols()] = 0;
1327  }
1328 
1329  // diagonal
1330  if (
1331 
1332  clo->getRow() == clo->getCol() && clo->getLocRow() < loc_m &&
1333  clo->getLocCol() < loc_n
1334 
1335  ) {
1336  auto r_shift = col - clo->getCol();
1337  if (r_shift >= 0 && r_shift < clo->getNbRows()) {
1338  ptr[c_shift + r_shift * clo->getNbCols()] = diag;
1339  }
1340  }
1341  } else if (clo->getCol() + clo->getNbCols() > col) {
1342  break;
1343  }
1344  }
1345  }
1346 
1347  if (is_local) {
1348  CHKERR ISRestoreIndices(is_local, &ptr);
1349  }
1350 
1351  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_zeroRowsAndCols, 0, 0, 0, 0);
1352 
1354 }
1355 
1356 static PetscErrorCode mat_zero(Mat m) {
1358  BlockStructure *ctx;
1359  CHKERR MatShellGetContext(m, (void **)&ctx);
1360  if (ctx->dataBlocksPtr)
1361  std::fill(ctx->dataBlocksPtr->begin(), ctx->dataBlocksPtr->end(), 0.);
1362  if (ctx->dataInvBlocksPtr)
1363  std::fill(ctx->dataInvBlocksPtr->begin(), ctx->dataInvBlocksPtr->end(), 0.);
1364  if (ctx->preconditionerBlocksPtr)
1365  std::fill(ctx->preconditionerBlocksPtr->begin(),
1366  ctx->preconditionerBlocksPtr->end(), 0.);
1368 }
1369 
1372  CHKERR MatShellSetManageScalingShifts(mat_raw);
1373  CHKERR MatShellSetOperation(mat_raw, MATOP_MULT, (void (*)(void))mult);
1374  CHKERR MatShellSetOperation(mat_raw, MATOP_MULT_ADD,
1375  (void (*)(void))mult_add);
1376  CHKERR MatShellSetOperation(mat_raw, MATOP_SOLVE, (void (*)(void))solve);
1377  CHKERR MatShellSetOperation(mat_raw, MATOP_SOLVE_ADD,
1378  (void (*)(void))solve_add);
1379  CHKERR MatShellSetOperation(mat_raw, MATOP_ZERO_ENTRIES,
1380  (void (*)(void))mat_zero);
1381  CHKERR MatShellSetOperation(mat_raw, MATOP_ZERO_ROWS_COLUMNS,
1382  (void (*)(void))zero_rows_columns);
1383 
1385 };
1386 
1388  boost::shared_ptr<BlockStructure> data) {
1389 
1390  auto problem_ptr = getProblemPtr(dm);
1391  auto nb_local = problem_ptr->nbLocDofsRow;
1392  auto nb_global = problem_ptr->nbDofsRow;
1393 
1394  // check in nb, rows is equal to nb. columns.
1395  if (nb_local != problem_ptr->nbLocDofsCol) {
1396  MOFEM_LOG("SELF", Sev::error)
1397  << "Wrong size " << nb_local << " != " << problem_ptr->nbLocDofsCol;
1399  "nb. cols is inconsistent with nb. rows");
1400  }
1401  if (nb_global != problem_ptr->nbDofsCol) {
1402  MOFEM_LOG("SELF", Sev::error)
1403  << "Wrong size " << nb_global << " != " << problem_ptr->nbDofsCol;
1405  "nb. cols is inconsistent with nb. rows");
1406  }
1407 
1408  // get comm from DM
1409  MPI_Comm comm;
1410  CHKERR PetscObjectGetComm((PetscObject)dm, &comm);
1411 
1412  Mat mat_raw;
1413  CHKERR MatCreateShell(comm, nb_local, nb_local, nb_global, nb_global,
1414  (void *)data.get(), &mat_raw);
1415  CHKERR setSchurBlockMatOps(mat_raw);
1416  // CHKERR PetscObjectSetName((PetscObject)mat_raw, MoFEM_BLOCK_MAT);
1417 
1418  return std::make_pair(SmartPetscObj<Mat>(mat_raw), data);
1419 }
1420 
1422  InsertMode iora) {
1424  BlockStructure *ctx;
1425  CHKERR MatShellGetContext(mat, (void **)&ctx);
1426 
1427  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_BlockStructureMult, 0, 0, 0, 0);
1428 
1429  Vec ghost_x = ctx->ghostX;
1430  Vec ghost_y = ctx->ghostY;
1431 
1432  CHKERR VecCopy(x, ghost_x);
1433 
1434  CHKERR VecGhostUpdateBegin(ghost_x, INSERT_VALUES, SCATTER_FORWARD);
1435  CHKERR VecGhostUpdateEnd(ghost_x, INSERT_VALUES, SCATTER_FORWARD);
1436 
1437  double *x_array;
1438  Vec loc_ghost_x;
1439  CHKERR VecGhostGetLocalForm(ghost_x, &loc_ghost_x);
1440  CHKERR VecGetArray(loc_ghost_x, &x_array);
1441 
1442  double *y_array;
1443  Vec loc_ghost_y;
1444  CHKERR VecGhostGetLocalForm(ghost_y, &loc_ghost_y);
1445  int nb_y;
1446  CHKERR VecGetLocalSize(loc_ghost_y, &nb_y);
1447  CHKERR VecGetArray(loc_ghost_y, &y_array);
1448  for (auto i = 0; i != nb_y; ++i)
1449  y_array[i] = 0.;
1450 
1451  double *block_ptr = &*ctx->dataBlocksPtr->begin();
1452 
1453  auto it = ctx->blockIndex.get<0>().lower_bound(0);
1454  auto hi = ctx->blockIndex.get<0>().end();
1455 
1456  while (it != hi) {
1457  auto nb_rows = it->getNbRows();
1458  auto nb_cols = it->getNbCols();
1459  auto x_ptr = &x_array[it->getLocCol()];
1460  auto y_ptr = &y_array[it->getLocRow()];
1461  auto ptr = &block_ptr[it->getMatShift()];
1462  for (auto r = 0; r != nb_rows; ++r) {
1463  for (auto c = 0; c != nb_cols; ++c) {
1464  y_ptr[r] += ptr[r * nb_cols + c] * x_ptr[c];
1465  }
1466  }
1467  ++it;
1468  }
1469 
1470  if (ctx->multiplyByPreconditioner) {
1471 
1472  if (!ctx->preconditionerBlocksPtr)
1473  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1474  "No parentBlockStructurePtr");
1475 
1476  auto preconditioner_ptr = &*ctx->preconditionerBlocksPtr->begin();
1477 
1478  auto it = ctx->blockIndex.get<0>().lower_bound(0);
1479  auto hi = ctx->blockIndex.get<0>().end();
1480 
1481  while (it != hi) {
1482  if (it->getInvShift() != -1) {
1483  auto nb_rows = it->getNbRows();
1484  auto nb_cols = it->getNbCols();
1485  auto x_ptr = &x_array[it->getLocCol()];
1486  auto y_ptr = &y_array[it->getLocRow()];
1487  auto ptr = &preconditioner_ptr[it->getInvShift()];
1488  for (auto r = 0; r != nb_rows; ++r) {
1489  for (auto c = 0; c != nb_cols; ++c) {
1490  y_ptr[r] += ptr[r * nb_cols + c] * x_ptr[c];
1491  }
1492  }
1493  }
1494  ++it;
1495  }
1496  }
1497 
1498  CHKERR VecRestoreArray(loc_ghost_x, &x_array);
1499  CHKERR VecRestoreArray(loc_ghost_y, &y_array);
1500  CHKERR VecGhostRestoreLocalForm(ghost_x, &loc_ghost_x);
1501  CHKERR VecGhostRestoreLocalForm(ghost_y, &loc_ghost_y);
1502 
1503  CHKERR VecGhostUpdateBegin(ghost_y, ADD_VALUES, SCATTER_REVERSE);
1504  CHKERR VecGhostUpdateEnd(ghost_y, ADD_VALUES, SCATTER_REVERSE);
1505 
1506  switch (iora) {
1507  case INSERT_VALUES:
1508  CHKERR VecCopy(ghost_y, y);
1509  break;
1510  case ADD_VALUES:
1511  CHKERR VecAXPY(y, 1., ghost_y);
1512  break;
1513  default:
1514  CHK_MOAB_THROW(MOFEM_NOT_IMPLEMENTED, "Wrong InsertMode");
1515  }
1516 
1517 #ifndef NDEBUG
1518 
1519  auto print_norm = [&](auto msg, auto y) {
1521  PetscReal norm;
1522  CHKERR VecNorm(y, NORM_2, &norm);
1523  int nb_loc_y;
1524  CHKERR VecGetLocalSize(y, &nb_loc_y);
1525  int nb_y;
1526  CHKERR VecGetSize(y, &nb_y);
1527  MOFEM_LOG("WORLD", Sev::noisy)
1528  << msg << " " << nb_y << " " << nb_loc_y << " norm " << norm;
1530  };
1531 
1532  switch (iora) {
1533  case INSERT_VALUES:
1534  print_norm("mult_schur_block_shell insert x", x);
1535  print_norm("mult_schur_block_shell insert y", y);
1536  break;
1537  case ADD_VALUES:
1538  print_norm("mult_schur_block_shell add x", x);
1539  print_norm("mult_schur_block_shell add y", y);
1540  break;
1541  default:
1542  CHK_MOAB_THROW(MOFEM_NOT_IMPLEMENTED, "Wrong InsertMode");
1543  }
1544 
1545 #endif // NDEBUG
1546 
1547  // PetscLogFlops(xxx)
1548  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_BlockStructureMult, 0, 0, 0, 0);
1549 
1551 }
1552 
1554  InsertMode iora) {
1556  BlockStructure *ctx;
1557  CHKERR MatShellGetContext(mat, (void **)&ctx);
1558 
1559  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_BlockStructureSolve, 0, 0, 0, 0);
1560 
1561  // Note that for solver those two are swapped
1562  Vec ghost_x = ctx->ghostY;
1563  Vec ghost_y = ctx->ghostX;
1564 
1565  CHKERR VecCopy(y, ghost_y);
1566  CHKERR VecZeroEntries(ghost_x);
1567 
1568  CHKERR VecGhostUpdateBegin(ghost_y, INSERT_VALUES, SCATTER_FORWARD);
1569  CHKERR VecGhostUpdateEnd(ghost_y, INSERT_VALUES, SCATTER_FORWARD);
1570 
1571  double *x_array;
1572  Vec loc_ghost_x;
1573  CHKERR VecGhostGetLocalForm(ghost_x, &loc_ghost_x);
1574  CHKERR VecGetArray(loc_ghost_x, &x_array);
1575 
1576  double *y_array;
1577  Vec loc_ghost_y;
1578  CHKERR VecGhostGetLocalForm(ghost_y, &loc_ghost_y);
1579  CHKERR VecGetArray(loc_ghost_y, &y_array);
1580 
1581  auto data_inv_blocks = ctx->dataInvBlocksPtr;
1582  auto inv_block_ptr = &*data_inv_blocks->begin();
1583  auto data_blocks = ctx->dataBlocksPtr;
1584  auto block_ptr = &*data_blocks->begin();
1585 
1586  auto *data_inv = dynamic_cast<DiagBlockInvStruture *>(ctx);
1587  auto index_view = &data_inv->indexView;
1588 
1589  std::vector<double> f;
1590 
1591  for (auto s1 = 0; s1 != index_view->second.size() - 1; ++s1) {
1592  auto lo = index_view->second[s1];
1593  auto hi = index_view->second[s1 + 1];
1594 
1595  auto diag_index_ptr = index_view->first[lo];
1596  ++lo;
1597 
1598  auto row = diag_index_ptr->getLocRow();
1599  auto col = diag_index_ptr->getLocCol();
1600  auto nb_rows = diag_index_ptr->getNbRows();
1601  auto nb_cols = diag_index_ptr->getNbCols();
1602  auto inv_shift = diag_index_ptr->getInvShift();
1603 
1604  f.resize(nb_cols);
1605  std::copy(&y_array[col], &y_array[col + nb_cols], f.begin());
1606 
1607  for (; lo != hi; ++lo) {
1608  auto off_index_ptr = index_view->first[lo];
1609  auto off_col = off_index_ptr->getLocCol();
1610  auto off_nb_cols = off_index_ptr->getNbCols();
1611  auto off_shift = off_index_ptr->getMatShift();
1612  auto x_ptr = &x_array[off_col];
1613  auto ptr = &block_ptr[off_shift];
1614  for (auto r = 0; r != nb_rows; ++r) {
1615  for (auto c = 0; c != off_nb_cols; ++c) {
1616  f[r] -= ptr[r * off_nb_cols + c] * x_ptr[c];
1617  }
1618  }
1619  }
1620 
1621 #ifndef NDEBUG
1622  if (inv_shift == -1)
1623  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "inv_shift == -1");
1624  if (inv_shift + nb_rows * nb_cols > data_inv_blocks->size())
1625  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1626  "inv_shift out of range %d > %d", inv_shift + nb_rows * nb_cols,
1627  data_inv_blocks->size());
1628 #endif // NDEBUG
1629 
1630  auto ptr = &inv_block_ptr[inv_shift];
1631  for (auto r = 0; r != nb_rows; ++r) {
1632  for (auto c = 0; c != nb_cols; ++c) {
1633  x_array[row + r] -= ptr[r * nb_cols + c] * f[c];
1634  }
1635  }
1636  }
1637 
1638  CHKERR VecRestoreArray(loc_ghost_x, &x_array);
1639  CHKERR VecRestoreArray(loc_ghost_y, &y_array);
1640  CHKERR VecGhostRestoreLocalForm(ghost_x, &loc_ghost_x);
1641  CHKERR VecGhostRestoreLocalForm(ghost_y, &loc_ghost_y);
1642 
1643  CHKERR VecGhostUpdateBegin(ghost_y, ADD_VALUES, SCATTER_REVERSE);
1644  CHKERR VecGhostUpdateEnd(ghost_y, ADD_VALUES, SCATTER_REVERSE);
1645 
1646  switch (iora) {
1647  case INSERT_VALUES:
1648  CHKERR VecCopy(ghost_x, x);
1649  break;
1650  case ADD_VALUES:
1651  CHKERR VecAXPY(x, 1., ghost_x);
1652  break;
1653  default:
1654  CHK_MOAB_THROW(MOFEM_NOT_IMPLEMENTED, "Wrong InsertMode");
1655  }
1656 
1657 #ifndef NDEBUG
1658 
1659  auto print_norm = [&](auto msg, auto y) {
1661  PetscReal norm;
1662  CHKERR VecNorm(y, NORM_2, &norm);
1663  int nb_loc_y;
1664  CHKERR VecGetLocalSize(y, &nb_loc_y);
1665  int nb_y;
1666  CHKERR VecGetSize(y, &nb_y);
1667  MOFEM_LOG("WORLD", Sev::noisy)
1668  << msg << " " << nb_y << " " << nb_loc_y << " norm " << norm;
1670  };
1671 
1672  switch (iora) {
1673  case INSERT_VALUES:
1674  print_norm("solve_schur_block_shell insert x", x);
1675  print_norm("solve_schur_block_shell insert y", y);
1676  break;
1677  case ADD_VALUES:
1678  print_norm("solve_schur_block_shell add x", x);
1679  print_norm("solve_schur_block_shell add y", y);
1680  break;
1681  default:
1682  CHK_MOAB_THROW(MOFEM_NOT_IMPLEMENTED, "Wrong InsertMode");
1683  }
1684 
1685 #endif // NDEBUG
1686 
1687  // PetscLogFlops(xxx)
1688  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_BlockStructureSolve, 0, 0, 0, 0);
1689 
1691 }
1692 
1694  BlockStructure *ctx, const EntitiesFieldData::EntData &row_data,
1695  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
1696  InsertMode iora,
1697  boost::function<int(const DiagBlockIndex::Indexes *)> shift_extractor,
1698  boost::shared_ptr<std::vector<double>> data_blocks_ptr) {
1699 
1701 
1702  if (row_data.getIndices().empty())
1704  if (col_data.getIndices().empty())
1706 
1707 #ifndef NDEBUG
1708 
1709  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_BlockStructureSetValues, 0, 0, 0,
1710  0);
1711 
1712 #endif // NDEBUG
1713 
1714  auto get_row_data = [&]() -> std::pair<bool, VectorInt> {
1715  if (auto e_ptr = row_data.getFieldEntities()[0]) {
1716  if (auto stored_data_ptr =
1717  e_ptr->getSharedStoragePtr<EssentialBcStorage>()) {
1718  return std::make_pair(true, stored_data_ptr->entityIndices);
1719  }
1720  }
1721  return std::make_pair(false, row_data.getIndices());
1722  };
1723 
1724  auto row_indices = get_row_data();
1725  auto it_r = std::find(row_indices.second.begin(), row_indices.second.end(),
1726  -1) == row_indices.second.end();
1727  auto it_c =
1728  std::find(col_data.getIndices().begin(), col_data.getIndices().end(),
1729  -1) == col_data.getIndices().end();
1730 
1731  if (
1732 
1733  !row_indices.first
1734 
1735  && row_data.getFieldEntities()[0]->getNbDofsOnEnt() ==
1736  row_indices.second.size()
1737 
1738  && col_data.getFieldEntities()[0]->getNbDofsOnEnt() ==
1739  col_data.getIndices().size()
1740 
1741  && it_r && it_c
1742 
1743  ) {
1744 
1745  auto it = ctx->blockIndex.get<1>().find(
1746 
1747  boost::make_tuple(row_indices.second[0], col_data.getIndices()[0],
1748  row_indices.second.size(),
1749  col_data.getIndices().size())
1750 
1751  );
1752 
1753 #ifndef NDEBUG
1754 
1755  if (it == ctx->blockIndex.get<1>().end()) {
1756  MOFEM_LOG_CHANNEL("SELF");
1757  MOFEM_TAG_AND_LOG("SELF", Sev::error, "BlockMat")
1758  << "missing block: " << row_data.getFieldDofs()[0]->getName() << " : "
1759  << col_data.getFieldDofs()[0]->getName();
1760  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "Block not allocated");
1761  }
1762 
1763 #endif
1764 
1765  auto shift = shift_extractor(&*it);
1766  if (shift != -1) {
1767 
1768  auto nbr = row_indices.second.size();
1769  auto nbc = col_data.getIndices().size();
1770  auto mat_row_ptr = &mat(0, 0);
1771  auto s_mat = &(*data_blocks_ptr)[shift];
1772  if (iora == ADD_VALUES) {
1773  for (int r = 0; r != nbr; ++r) {
1774  for (int c = 0; c != nbc; ++c) {
1775  *s_mat += *mat_row_ptr;
1776  ++mat_row_ptr;
1777  ++s_mat;
1778  }
1779  }
1780  } else {
1781  for (int r = 0; r != nbr; ++r) {
1782  for (int c = 0; c != nbc; ++c) {
1783  *s_mat = *mat_row_ptr;
1784  ++mat_row_ptr;
1785  ++s_mat;
1786  }
1787  }
1788  }
1789  }
1790 
1791  } else {
1792 
1793  // This is to manage all complexities with negative indices, etc., nodes
1794  // etc. It is much slower than the above, but it is more general
1795 
1796  std::vector<int> row_ent_idx;
1797  std::vector<int> col_ent_idx;
1798 
1799  auto get_ent_idx = [&](auto lo, auto hi, auto &idx) {
1800  idx.clear();
1801  idx.reserve(std::distance(lo, hi));
1802  for (; lo != hi; ++lo) {
1803  idx.emplace_back((*lo)->getEntDofIdx());
1804  }
1805  };
1806 
1807  auto row = 0;
1808  for (auto &rent : row_data.getFieldEntities()) {
1809  if (auto r_cache = rent->entityCacheRowDofs.lock()) {
1810 
1811  auto rlo = r_cache->loHi[0];
1812  auto rhi = r_cache->loHi[1];
1813  if (rlo == rhi)
1814  continue;
1815 
1816  get_ent_idx(rlo, rhi, row_ent_idx);
1817  auto gr = (*rlo)->getPetscGlobalDofIdx();
1818  auto nbr = std::distance(rlo, rhi);
1819  auto col = 0;
1820 
1821  for (auto &cent : col_data.getFieldEntities()) {
1822  if (auto c_cache = cent->entityCacheColDofs.lock()) {
1823 
1824  auto clo = c_cache->loHi[0];
1825  auto chi = c_cache->loHi[1];
1826  if (clo == chi)
1827  continue;
1828 
1829  auto nbc = std::distance(clo, chi);
1830  auto it = ctx->blockIndex.get<1>().find(
1831 
1832  boost::make_tuple(gr, (*clo)->getPetscGlobalDofIdx(), nbr, nbc)
1833 
1834  );
1835 
1836 #ifndef NDEBUG
1837 
1838  if (it == ctx->blockIndex.get<1>().end()) {
1839  MOFEM_LOG_CHANNEL("SELF");
1840  MOFEM_TAG_AND_LOG("SELF", Sev::error, "BlockMat")
1841  << "missing block: " << row_data.getFieldDofs()[0]->getName()
1842  << " : " << col_data.getFieldDofs()[0]->getName();
1843  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1844  "Block not allocated");
1845  }
1846 
1847 #endif
1848 
1849  auto shift = shift_extractor(&*it);
1850  if (shift != -1) {
1851 
1852  auto mat_row_ptr0 = &mat(row, col);
1853  auto s_mat = &(*data_blocks_ptr)[shift];
1854 
1855  if (
1856 
1857  nbr == rent->getNbDofsOnEnt() && nbc == cent->getNbDofsOnEnt()
1858 
1859  && it_r && it_c
1860 
1861  ) {
1862 
1863  if (iora == ADD_VALUES) {
1864  for (int r = 0; r != nbr; ++r) {
1865  auto mat_row_ptr = mat_row_ptr0 + r * mat.size2();
1866  for (int c = 0; c != nbc; ++c) {
1867  *s_mat += *mat_row_ptr;
1868  ++mat_row_ptr;
1869  ++s_mat;
1870  }
1871  }
1872  } else {
1873  for (int r = 0; r != nbr; ++r) {
1874  auto mat_row_ptr = mat_row_ptr0 + r * mat.size2();
1875  for (int c = 0; c != nbc; ++c) {
1876  *s_mat = *mat_row_ptr;
1877  ++mat_row_ptr;
1878  ++s_mat;
1879  }
1880  }
1881  }
1882 
1883  } else if (
1884 
1885  nbc == cent->getNbDofsOnEnt()
1886 
1887  && it_r && it_c
1888 
1889  ) {
1890 
1891  if (iora == ADD_VALUES) {
1892  for (auto r : row_ent_idx) {
1893  auto mat_row_ptr = mat_row_ptr0 + r * mat.size2();
1894  for (int c = 0; c != nbc; ++c) {
1895  *s_mat += *mat_row_ptr;
1896  ++mat_row_ptr;
1897  ++s_mat;
1898  }
1899  }
1900  } else {
1901  for (auto r : row_ent_idx) {
1902  auto mat_row_ptr = mat_row_ptr0 + r * mat.size2();
1903  for (int c = 0; c != nbc; ++c) {
1904  *s_mat = *mat_row_ptr;
1905  ++mat_row_ptr;
1906  ++s_mat;
1907  }
1908  }
1909  }
1910 
1911  } else {
1912 
1913  get_ent_idx(clo, chi, col_ent_idx);
1914  auto row_idx = &row_indices.second[row];
1915  auto col_idx = &col_data.getIndices()[col];
1916  if (iora == ADD_VALUES) {
1917  for (auto r : row_ent_idx) {
1918  auto mat_row_ptr = mat_row_ptr0 + r * mat.size2();
1919  for (auto c : col_ent_idx) {
1920  if (row_idx[r] != -1 && col_idx[c] != -1)
1921  *s_mat += mat_row_ptr[c];
1922  ++s_mat;
1923  }
1924  }
1925  } else {
1926  for (auto r : row_ent_idx) {
1927  auto mat_row_ptr = mat_row_ptr0 + r * mat.size2();
1928  for (auto c : col_ent_idx) {
1929  if (row_idx[r] != -1 && col_idx[c] != -1)
1930  *s_mat = mat_row_ptr[c];
1931  ++s_mat;
1932  }
1933  }
1934  }
1935  }
1936  }
1937  }
1938  col += cent->getNbDofsOnEnt();
1939  }
1940  }
1941  row += rent->getNbDofsOnEnt();
1942  }
1943  }
1944 
1945 #ifndef NDEBUG
1946  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_BlockStructureSetValues, 0, 0, 0,
1947  0);
1948 #endif // NDEBUG
1949 
1951 }
1952 
1955  const EntitiesFieldData::EntData &col_data,
1956  const MatrixDouble &mat, InsertMode iora) {
1958  PetscBool is_mat_shell = PETSC_FALSE;
1959  PetscObjectTypeCompare((PetscObject)M, MATSHELL, &is_mat_shell);
1960  if (is_mat_shell) {
1961  BlockStructure *ctx;
1962  CHKERR MatShellGetContext(M, (void **)&ctx);
1964  ctx, row_data, col_data, mat, iora,
1965  [](const DiagBlockIndex::Indexes *idx) { return idx->getMatShift(); },
1966  ctx->dataBlocksPtr);
1967  } else {
1968  CHKERR MatSetValues<AssemblyTypeSelector<PETSC>>(M, row_data, col_data, mat,
1969  iora);
1970  }
1972 }
1973 
1975  Mat M, const EntitiesFieldData::EntData &row_data,
1976  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
1977  InsertMode iora) {
1979  BlockStructure *ctx;
1980  CHKERR MatShellGetContext(M, (void **)&ctx);
1981  if (!ctx->preconditionerBlocksPtr)
1982  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1983  "No preconditionerBlocksPtr");
1985  ctx, row_data, col_data, mat, iora,
1986  [](const DiagBlockIndex::Indexes *idx) { return idx->getInvShift(); },
1987  ctx->preconditionerBlocksPtr);
1989 }
1990 
1991 boost::shared_ptr<NestSchurData> getNestSchurData(
1992 
1993  std::pair<SmartPetscObj<DM>, SmartPetscObj<DM>> dms,
1994  boost::shared_ptr<BlockStructure> block_mat_data_ptr,
1995 
1996  std::vector<std::string> fields_names, //< a00 fields
1997  std::vector<boost::shared_ptr<Range>>
1998  field_ents, //< a00 ranges (can be null),
1999  bool add_preconditioner_block) {
2000 
2001  if (!block_mat_data_ptr) {
2002  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block data not set");
2003  }
2004 
2005  auto [schur_dm, block_dm] = dms;
2006  auto schur_prb = getProblemPtr(schur_dm);
2007  auto block_prb = getProblemPtr(block_dm);
2008  auto m_field_ptr = getInterfacePtr(block_dm);
2009 
2010  auto schur_dofs_row = schur_prb->getNumeredRowDofsPtr();
2011  auto schur_dofs_col = schur_prb->getNumeredColDofsPtr();
2012  auto block_dofs_row = block_prb->getNumeredRowDofsPtr();
2013  auto block_dofs_col = block_prb->getNumeredColDofsPtr();
2014 
2015  auto ao_schur_row = schur_prb->getSubData()->getSmartRowMap();
2016  auto ao_schur_col = schur_prb->getSubData()->getSmartColMap();
2017  auto ao_block_row = block_prb->getSubData()->getSmartRowMap();
2018  auto ao_block_col = block_prb->getSubData()->getSmartColMap();
2019 
2020  auto schur_vec_x = createDMVector(schur_dm);
2021  auto block_vec_x = createDMVector(block_dm);
2022  auto schur_vec_y = vectorDuplicate(schur_vec_x);
2023  auto block_vec_y = vectorDuplicate(block_vec_x);
2024  CHKERR VecSetDM(schur_vec_x, PETSC_NULL);
2025  CHKERR VecSetDM(block_vec_x, PETSC_NULL);
2026  CHKERR VecSetDM(schur_vec_y, PETSC_NULL);
2027  CHKERR VecSetDM(block_vec_y, PETSC_NULL);
2028 
2029  auto get_vec = [&](auto schur_data) {
2030  std::vector<int> vec_r, vec_c;
2031  vec_r.reserve(schur_data->blockIndex.size());
2032  vec_c.reserve(schur_data->blockIndex.size());
2033  for (auto &d : schur_data->blockIndex.template get<1>()) {
2034  vec_r.push_back(d.getRow());
2035  vec_c.push_back(d.getCol());
2036  }
2037  return std::make_pair(vec_r, vec_c);
2038  };
2039 
2040  auto [vec_r_schur, vec_c_schur] = get_vec(block_mat_data_ptr);
2041  CHKERR AOApplicationToPetsc(ao_schur_row, vec_r_schur.size(),
2042  &*vec_r_schur.begin());
2043  CHKERR AOApplicationToPetsc(ao_schur_col, vec_c_schur.size(),
2044  &*vec_c_schur.begin());
2045  auto [vec_r_block, vec_c_block] = get_vec(block_mat_data_ptr);
2046  CHKERR AOApplicationToPetsc(ao_block_row, vec_r_block.size(),
2047  &*vec_r_block.begin());
2048  CHKERR AOApplicationToPetsc(ao_block_col, vec_c_block.size(),
2049  &*vec_c_block.begin());
2050 
2051  std::array<boost::shared_ptr<BlockStructure>, 4> data_ptrs;
2052 
2053  for (auto r = 0; r != 3; ++r) {
2054  data_ptrs[r] = boost::make_shared<BlockStructure>();
2055  data_ptrs[r]->dataBlocksPtr = block_mat_data_ptr->dataBlocksPtr;
2056  }
2057  data_ptrs[3] = boost::make_shared<DiagBlockInvStruture>();
2058  data_ptrs[3]->dataBlocksPtr = block_mat_data_ptr->dataBlocksPtr;
2059 
2060  data_ptrs[0]->ghostX = schur_vec_x;
2061  data_ptrs[0]->ghostY = schur_vec_y;
2062  data_ptrs[1]->ghostX = block_vec_x;
2063  data_ptrs[1]->ghostY = schur_vec_y;
2064  data_ptrs[2]->ghostX = schur_vec_x;
2065  data_ptrs[2]->ghostY = block_vec_y;
2066  data_ptrs[3]->ghostX = block_vec_x;
2067  data_ptrs[3]->ghostY = block_vec_y;
2068 
2069  int idx = 0;
2070  int inv_mem_size = 0;
2071  for (auto &d : block_mat_data_ptr->blockIndex.get<1>()) {
2072 
2073  auto insert = [&](auto &m, auto &dof_r, auto &dof_c, auto &d) {
2074  m.insert(
2075 
2077  dof_r->getPetscGlobalDofIdx(), dof_c->getPetscGlobalDofIdx(),
2078 
2079  d.getNbRows(), d.getNbCols(),
2080 
2081  dof_r->getPetscLocalDofIdx(), dof_c->getPetscLocalDofIdx(),
2082 
2083  d.getMatShift(), d.getInvShift()}
2084 
2085  );
2086  };
2087 
2088  if (vec_r_schur[idx] != -1 && vec_c_schur[idx] != -1) {
2089  auto schur_dof_r =
2090  schur_dofs_row->get<PetscGlobalIdx_mi_tag>().find(vec_r_schur[idx]);
2091  auto schur_dof_c =
2092  schur_dofs_col->get<PetscGlobalIdx_mi_tag>().find(vec_c_schur[idx]);
2093 #ifndef NDEBUG
2094  if (schur_dof_r == schur_dofs_row->get<PetscGlobalIdx_mi_tag>().end()) {
2095  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block <Schur> not found");
2096  }
2097  if (schur_dof_c == schur_dofs_col->get<PetscGlobalIdx_mi_tag>().end()) {
2098  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block <Schur> not found");
2099  }
2100 #endif // NDEBUG
2101  insert(data_ptrs[0]->blockIndex, *schur_dof_r, *schur_dof_c, d);
2102  }
2103 
2104  if (vec_r_schur[idx] != -1 && vec_c_block[idx] != -1) {
2105  auto schur_dof_r =
2106  schur_dofs_row->get<PetscGlobalIdx_mi_tag>().find(vec_r_schur[idx]);
2107  auto block_dof_c =
2108  block_dofs_col->get<PetscGlobalIdx_mi_tag>().find(vec_c_block[idx]);
2109 #ifndef NDEBUG
2110  if (schur_dof_r == schur_dofs_row->get<PetscGlobalIdx_mi_tag>().end()) {
2111  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block <Schur> not found");
2112  }
2113  if (block_dof_c == block_dofs_col->get<PetscGlobalIdx_mi_tag>().end()) {
2114  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block <Block> not found");
2115  }
2116 #endif
2117  insert(data_ptrs[1]->blockIndex, *schur_dof_r, *block_dof_c, d);
2118  }
2119 
2120  if (vec_r_block[idx] != -1 && vec_c_schur[idx] != -1) {
2121  auto block_dof_r =
2122  block_dofs_row->get<PetscGlobalIdx_mi_tag>().find(vec_r_block[idx]);
2123  auto schur_dof_c =
2124  schur_dofs_col->get<PetscGlobalIdx_mi_tag>().find(vec_c_schur[idx]);
2125 #ifndef NDEBUG
2126  if (block_dof_r == block_dofs_row->get<PetscGlobalIdx_mi_tag>().end()) {
2127  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block <Block> not found");
2128  }
2129  if (schur_dof_c == schur_dofs_col->get<PetscGlobalIdx_mi_tag>().end()) {
2130  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block <Schur> not found");
2131  }
2132 #endif // NDEBUG
2133  insert(data_ptrs[2]->blockIndex, *block_dof_r, *schur_dof_c, d);
2134  }
2135 
2136  if (vec_r_block[idx] != -1 && vec_c_block[idx] != -1) {
2137  auto block_dof_r =
2138  block_dofs_row->get<PetscGlobalIdx_mi_tag>().find(vec_r_block[idx]);
2139  auto block_dof_c =
2140  block_dofs_col->get<PetscGlobalIdx_mi_tag>().find(vec_c_block[idx]);
2141  if (d.getRow() == d.getCol() && d.getNbRows() == d.getNbCols()) {
2142  // Only store inverse of diagonal blocks
2143  d.getInvShift() = inv_mem_size;
2144  inv_mem_size += d.getNbCols() * d.getNbCols();
2145  }
2146  insert(data_ptrs[3]->blockIndex, *block_dof_r, *block_dof_c, d);
2147  }
2148 
2149  ++idx;
2150  }
2151 
2152  auto set_up_a00_data = [&](auto inv_block_data) {
2154 
2155  auto get_a00_uids = [&]() {
2156  auto get_field_bit = [&](auto field_name) {
2157  return m_field_ptr->get_field_bit_number(field_name);
2158  };
2159 
2160  std::vector<std::pair<UId, UId>> a00_uids;
2161  a00_uids.reserve(fields_names.size());
2162  for (auto ss = 0; ss != fields_names.size(); ++ss) {
2163  auto field_bit = get_field_bit(fields_names[ss]);
2164  auto row_ents = field_ents[ss];
2165  if (row_ents) {
2166  for (auto p = row_ents->pair_begin(); p != row_ents->pair_end();
2167  ++p) {
2168  auto lo_uid =
2169  FieldEntity::getLoLocalEntityBitNumber(field_bit, p->first);
2170  auto hi_uid =
2171  FieldEntity::getHiLocalEntityBitNumber(field_bit, p->second);
2172  a00_uids.push_back(std::make_pair(lo_uid, hi_uid));
2173  }
2174  } else {
2176  field_bit, get_id_for_min_type<MBVERTEX>());
2178  field_bit, get_id_for_max_type<MBENTITYSET>());
2179  a00_uids.push_back(std::make_pair(lo_uid, hi_uid));
2180  }
2181  }
2182  return a00_uids;
2183  };
2184 
2185  auto get_glob_idex_pairs = [&](auto &&uid_pairs) {
2186  std::vector<std::pair<int, int>> glob_idex_pairs;
2187  glob_idex_pairs.reserve(uid_pairs.size());
2188  auto dofs = block_prb->getNumeredRowDofsPtr();
2189 
2190  auto it = uid_pairs.rbegin();
2191  auto hi = uid_pairs.rend();
2192 
2193  for (; it != hi; ++it) {
2194  auto [lo_uid, hi_uid] = *it;
2195  auto lo_it = dofs->lower_bound(lo_uid);
2196  auto hi_it = dofs->upper_bound(hi_uid);
2197  if (lo_it != hi_it) {
2198  auto lo_idx = (*lo_it)->getPetscGlobalDofIdx();
2199  glob_idex_pairs.emplace_back(lo_idx, std::distance(lo_it, hi_it));
2200  }
2201  }
2202  return glob_idex_pairs;
2203  };
2204 
2205  auto &index_view =
2206  boost::dynamic_pointer_cast<DiagBlockInvStruture>(inv_block_data)
2207  ->indexView;
2208 
2209  index_view.first.resize(0);
2210  index_view.second.resize(0);
2211  index_view.first.reserve(inv_block_data->blockIndex.size());
2212  index_view.second.reserve(inv_block_data->blockIndex.size());
2213  index_view.second.push_back(0);
2214 
2215  // this enable search by varying ranges
2216  using BlockIndexView = multi_index_container<
2217 
2218  const DiagBlockIndex::Indexes *,
2219 
2220  indexed_by<
2221 
2222  ordered_non_unique<const_mem_fun<DiagBlockIndex::Indexes, int,
2224 
2225  >>;
2226 
2227  BlockIndexView block_index_view;
2228  for (auto it = inv_block_data->blockIndex.template get<1>().begin();
2229  it != inv_block_data->blockIndex.template get<1>().end(); ++it) {
2230  block_index_view.insert(&*it);
2231  }
2232 
2233  auto glob_idx_pairs = get_glob_idex_pairs(get_a00_uids());
2234 
2235  // iterate field & entities pairs
2236  for (auto s1 = 0; s1 != glob_idx_pairs.size(); ++s1) {
2237 
2238  // iterate matrix indexes
2239  auto [lo_idx, nb] = glob_idx_pairs[s1];
2240  auto it = block_index_view.lower_bound(lo_idx);
2241  auto hi = block_index_view.end();
2242 
2243  // iterate rows
2244  while (it != hi &&
2245  ((*it)->getRow() + (*it)->getNbRows()) <= lo_idx + nb) {
2246 
2247  auto row = (*it)->getRow();
2248 
2249  auto get_diag_index =
2250  [&](auto it) -> const MoFEM::DiagBlockIndex::Indexes * {
2251  while (it != hi && (*it)->getRow() == row) {
2252  if ((*it)->getCol() == row &&
2253  (*it)->getNbCols() == (*it)->getNbRows()) {
2254  auto ptr = *it;
2255  return ptr;
2256  }
2257  ++it;
2258  }
2259  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Diagonal not found");
2260  return nullptr;
2261  };
2262 
2263  auto push_off_diag = [&](auto it, auto s1) {
2264  while (it != hi && (*it)->getRow() == row) {
2265  for (int s2 = 0; s2 < s1; ++s2) {
2266  auto [col_lo_idx, col_nb] = glob_idx_pairs[s2];
2267  if ((*it)->getCol() >= col_lo_idx &&
2268  (*it)->getCol() + (*it)->getNbCols() <= col_lo_idx + col_nb) {
2269  index_view.first.push_back(*it);
2270  }
2271  }
2272  ++it;
2273  }
2274  };
2275 
2276  index_view.first.push_back(get_diag_index(it));
2277  push_off_diag(it, s1);
2278  index_view.second.push_back(index_view.first.size());
2279 
2280  while (it != hi && (*it)->getRow() == row) {
2281  ++it;
2282  }
2283  }
2284  }
2285 
2286  auto get_vec = [&]() {
2287  std::vector<int> vec_r, vec_c;
2288  vec_r.reserve(index_view.first.size());
2289  vec_c.reserve(index_view.first.size());
2290  for (auto &i : index_view.first) {
2291  vec_r.push_back(i->getRow());
2292  vec_c.push_back(i->getCol());
2293  }
2294  return std::make_pair(vec_r, vec_c);
2295  };
2296 
2297  auto [vec_r_block, vec_c_block] = get_vec();
2298  CHKERR AOPetscToApplication(ao_block_row, vec_r_block.size(),
2299  &*vec_r_block.begin());
2300  CHKERR AOPetscToApplication(ao_block_col, vec_c_block.size(),
2301  &*vec_c_block.begin());
2302 
2303  int inv_mem_size = 0;
2304  for (auto s1 = 0; s1 != index_view.second.size() - 1; ++s1) {
2305  auto lo = index_view.second[s1];
2306  // auto hi = index_view.second[s1 + 1];
2307 
2308  auto diag_index_ptr = index_view.first[lo];
2309  auto row = vec_r_block[lo];
2310  auto col = vec_c_block[lo];
2311  auto nb_rows = diag_index_ptr->getNbRows();
2312  auto nb_cols = diag_index_ptr->getNbCols();
2313  ++lo;
2314 
2315  auto it = block_mat_data_ptr->blockIndex.get<1>().find(
2316  boost::make_tuple(row, col, nb_rows, nb_cols));
2317  if (it == block_mat_data_ptr->blockIndex.get<1>().end()) {
2318  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "Block not found");
2319  }
2320 
2321  it->getInvShift() = inv_mem_size;
2322  diag_index_ptr->getInvShift() = it->getInvShift();
2323  inv_mem_size += nb_rows * nb_cols;
2324 
2325  // for (; lo != hi; ++lo) {
2326  // }
2327  }
2328 
2329  auto inv_data_ptr =
2330  boost::make_shared<std::vector<double>>(inv_mem_size, 0);
2331  data_ptrs[3]->dataInvBlocksPtr = inv_data_ptr;
2332  block_mat_data_ptr->dataInvBlocksPtr = data_ptrs[3]->dataInvBlocksPtr;
2333 
2334  if (add_preconditioner_block) {
2335  auto preconditioned_block =
2336  boost::make_shared<std::vector<double>>(inv_mem_size, 0);
2337  data_ptrs[3]->preconditionerBlocksPtr = preconditioned_block;
2338  data_ptrs[3]->multiplyByPreconditioner = true;
2339  block_mat_data_ptr->preconditionerBlocksPtr =
2340  data_ptrs[3]->preconditionerBlocksPtr;
2341  block_mat_data_ptr->multiplyByPreconditioner = false;
2342  }
2343 
2345  };
2346 
2347  CHKERR set_up_a00_data(data_ptrs[3]);
2348 
2349  MPI_Comm comm;
2350  CHKERR PetscObjectGetComm((PetscObject)schur_dm, &comm);
2351 
2352  auto create_shell_mat = [&](auto nb_r_loc, auto nb_c_loc, auto nb_r_glob,
2353  auto nb_c_glob, auto data_ptr) {
2354  Mat mat_raw;
2355  CHKERR MatCreateShell(comm, nb_r_loc, nb_c_loc, nb_r_glob, nb_c_glob,
2356  (void *)data_ptr.get(), &mat_raw);
2357  CHKERR setSchurBlockMatOps(mat_raw);
2358  return SmartPetscObj<Mat>(mat_raw);
2359  };
2360 
2361  auto schur_nb_global = schur_prb->getNbDofsRow();
2362  auto block_nb_global = block_prb->getNbDofsRow();
2363  auto schur_nb_local = schur_prb->getNbLocalDofsRow();
2364  auto block_nb_local = block_prb->getNbLocalDofsRow();
2365 
2366  std::array<SmartPetscObj<Mat>, 4> mats_array;
2367  mats_array[0] =
2368  create_shell_mat(schur_nb_local, schur_nb_local, schur_nb_global,
2369  schur_nb_global, data_ptrs[0]);
2370  mats_array[1] =
2371  create_shell_mat(schur_nb_local, block_nb_local, schur_nb_global,
2372  block_nb_global, data_ptrs[1]);
2373  mats_array[2] =
2374  create_shell_mat(block_nb_local, schur_nb_local, block_nb_global,
2375  schur_nb_global, data_ptrs[2]);
2376  mats_array[3] =
2377  create_shell_mat(block_nb_local, block_nb_local, block_nb_global,
2378  block_nb_global, data_ptrs[3]);
2379 
2380  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2381  << "(0, 0) " << schur_nb_local << " " << schur_nb_global << " "
2382  << data_ptrs[0]->blockIndex.size();
2383  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2384  << "(0, 1) " << schur_nb_local << " " << block_nb_local << " "
2385  << schur_nb_global << " " << block_nb_global << " "
2386  << data_ptrs[1]->blockIndex.size();
2387  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2388  << "(1, 0) " << block_nb_local << " " << schur_nb_local << " "
2389  << block_nb_global << " " << schur_nb_global << " "
2390  << data_ptrs[2]->blockIndex.size();
2391  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2392  << "(1, 1) " << block_nb_local << " " << block_nb_global << " "
2393  << data_ptrs[3]->blockIndex.size();
2394 
2395  MOFEM_LOG_SEVERITY_SYNC(comm, Sev::verbose);
2396 
2397  auto schur_is = schur_prb->getSubData()->getSmartRowIs();
2398  auto block_is = block_prb->getSubData()->getSmartRowIs();
2399 
2400  return boost::make_shared<NestSchurData>(
2401 
2402  mats_array, data_ptrs, block_mat_data_ptr,
2403  std::make_pair(schur_is, block_is)
2404 
2405  );
2406 }
2407 
2408 std::pair<SmartPetscObj<Mat>, boost::shared_ptr<NestSchurData>>
2409 createSchurNestedMatrix(boost::shared_ptr<NestSchurData> schur_net_data_ptr) {
2410 
2411  if (!schur_net_data_ptr)
2413 
2414  auto [mat_arrays, data_ptrs, block_mat_data_ptr, iss] = *schur_net_data_ptr;
2415  auto [schur_is, block_is] = iss;
2416 
2417  std::array<IS, 2> is_a = {schur_is, block_is};
2418  std::array<Mat, 4> mats_a = {
2419 
2420  mat_arrays[0], mat_arrays[1], mat_arrays[2], mat_arrays[3]
2421 
2422  };
2423 
2424  MPI_Comm comm;
2425  CHKERR PetscObjectGetComm((PetscObject)mat_arrays[0], &comm);
2426 
2427  Mat mat_raw;
2428  CHKERR MatCreateNest(
2429 
2430  comm, 2, is_a.data(), 2, is_a.data(), mats_a.data(), &mat_raw
2431 
2432  );
2433 
2434  return std::make_pair(SmartPetscObj<Mat>(mat_raw), schur_net_data_ptr);
2435 }
2436 
2438  return new OpSchurAssembleBegin();
2439 }
2440 
2441 OpSchurAssembleBase *
2442 createOpSchurAssembleEnd(std::vector<std::string> fields_name,
2443  std::vector<boost::shared_ptr<Range>> field_ents,
2444  std::vector<SmartPetscObj<AO>> sequence_of_aos,
2445  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
2446  std::vector<bool> sym_schur, bool symm_op,
2447  boost::shared_ptr<BlockStructure> diag_blocks) {
2448  if (symm_op)
2449  return new OpSchurAssembleEnd<SchurDSYSV>(fields_name, field_ents,
2450  sequence_of_aos, sequence_of_mats,
2451  sym_schur, symm_op, diag_blocks);
2452  else
2453  return new OpSchurAssembleEnd<SchurDGESV>(fields_name, field_ents,
2454  sequence_of_aos, sequence_of_mats,
2455  sym_schur, symm_op, diag_blocks);
2456 }
2457 
2458 OpSchurAssembleBase *
2459 createOpSchurAssembleEnd(std::vector<std::string> fields_name,
2460  std::vector<boost::shared_ptr<Range>> field_ents,
2461  std::vector<SmartPetscObj<AO>> sequence_of_aos,
2462  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
2463  std::vector<bool> sym_schur,
2464  std::vector<double> diag_eps, bool symm_op,
2465  boost::shared_ptr<BlockStructure> diag_blocks) {
2466  if (symm_op)
2467  return new OpSchurAssembleEnd<SchurDSYSV>(
2468  fields_name, field_ents, sequence_of_aos, sequence_of_mats, sym_schur,
2469  diag_eps, symm_op, diag_blocks);
2470  else
2471  return new OpSchurAssembleEnd<SchurDGESV>(
2472  fields_name, field_ents, sequence_of_aos, sequence_of_mats, sym_schur,
2473  diag_eps, symm_op, diag_blocks);
2474 }
2475 
2478 
2479  auto apply = [](PC pc, Vec f, Vec x) {
2481  Mat A, P;
2482  CHKERR PCGetOperators(pc, &A, &P);
2483  CHKERR MatSolve(P, f, x);
2485  };
2486 
2487  CHKERR PCSetType(pc, PCSHELL);
2488  CHKERR PCShellSetApply(pc, apply);
2489  CHKERR PCShellSetName(pc, "MoFEMSchurBlockPC");
2490 
2492 }
2493 
2494 // Assemble specialisations
2495 
2496 // This is used to assemble Schur complement "S"
2499 
2500 // This is used to assemble local element matrices for Schur complement
2501 // and matrix for KSP
2502 template <>
2505  const EntitiesFieldData::EntData &col_data,
2506  const MatrixDouble &mat, InsertMode iora) {
2507  return SchurElemMats::MatSetValues(M, row_data, col_data, mat, iora);
2508 }
2509 
2512  const EntitiesFieldData::EntData &col_data,
2513  const MatrixDouble &mat, InsertMode iora) {
2514  return MatSetValues<AssemblyTypeSelector<PETSC>>(M, row_data, col_data, mat,
2515  iora);
2516 }
2517 
2518 // Is assumed that standard PETSc assembly works for matrices used by KSP
2521 
2522 // We assemble matrix for KSP and store local matrices for Schur complement.
2523 // Schur complement is calculated and assembled in OpSchurAssembleEnd.
2526  const EntitiesFieldData::EntData &col_data,
2527  const MatrixDouble &mat, InsertMode iora) {
2530  mat, iora);
2531  CHKERR assembleStorage(row_data, col_data, mat, iora);
2533 }
2534 
2535 // All is now wrapped in specialisation of
2536 // MatSetValues<AssemblyTypeSelector<SCHUR>>
2537 template <>
2538 MoFEMErrorCode MatSetValues<AssemblyTypeSelector<SCHUR>>(
2539  Mat M, const EntitiesFieldData::EntData &row_data,
2540  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
2541  InsertMode iora) {
2542  return MatSetValues<SchurElemMats>(M, row_data, col_data, mat, iora);
2543 }
2544 
2545 // Standard (PETSc) assembly block matrices do noe work
2546 template <>
2549  const EntitiesFieldData::EntData &col_data,
2550  const MatrixDouble &mat, InsertMode iora) {
2551  return shell_block_mat_asmb_wrap(M, row_data, col_data, mat, iora);
2552 }
2553 
2555 
2556  static MoFEMErrorCode MatSetValues(Mat M,
2557  const EntitiesFieldData::EntData &row_data,
2558  const EntitiesFieldData::EntData &col_data,
2559  const MatrixDouble &mat, InsertMode iora);
2560 };
2561 
2565 
2568  const EntitiesFieldData::EntData &row_data,
2569  const EntitiesFieldData::EntData &col_data,
2570  const MatrixDouble &mat, InsertMode iora) {
2573  col_data, mat, iora);
2574  CHKERR assembleStorage(row_data, col_data, mat, iora);
2576 }
2577 
2578 template <>
2581  const EntitiesFieldData::EntData &row_data,
2582  const EntitiesFieldData::EntData &col_data,
2583  const MatrixDouble &mat, InsertMode iora) {
2584  return SchurElemMatsBlock::MatSetValues(M, row_data, col_data, mat, iora);
2585 }
2586 
2588 
2589  static MoFEMErrorCode MatSetValues(Mat M,
2590  const EntitiesFieldData::EntData &row_data,
2591  const EntitiesFieldData::EntData &col_data,
2592  const MatrixDouble &mat, InsertMode iora);
2593 };
2594 
2598 
2600  Mat M, const EntitiesFieldData::EntData &row_data,
2601  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
2602  InsertMode iora) {
2605  M, row_data, col_data, mat, iora);
2606  CHKERR assembleStorage(row_data, col_data, mat, iora);
2608 }
2609 
2610 template <>
2612  Mat M, const EntitiesFieldData::EntData &row_data,
2613  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
2614  InsertMode iora) {
2615  return SchurElemMatsPreconditionedBlock::MatSetValues(M, row_data, col_data,
2616  mat, iora);
2617 }
2618 
2620 schurSwitchPreconditioner(boost::shared_ptr<BlockStructure> block_mat_data) {
2622  if (block_mat_data->multiplyByPreconditioner) {
2623  block_mat_data->multiplyByPreconditioner = false;
2624  } else {
2625  if (!block_mat_data->preconditionerBlocksPtr) {
2626  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
2627  "preconditionerBlocksPtr not set");
2628  }
2629  block_mat_data->multiplyByPreconditioner = true;
2630  }
2632 }
2633 
2635 schurSaveBlockMesh(boost::shared_ptr<BlockStructure> block_mat_data,
2636  std::string filename) {
2638 
2639  moab::Core core;
2640  moab::Interface &moab = core;
2641 
2642  ReadUtilIface *iface;
2643  CHKERR moab.query_interface(iface);
2644 
2645  auto size = block_mat_data->blockIndex.size();
2646 
2647  EntityHandle starting_vertex;
2648  vector<double *> arrays_coord;
2649  CHKERR iface->get_node_coords(3, 4 * size, 0, starting_vertex, arrays_coord);
2650  EntityHandle starting_handle;
2651  EntityHandle *array;
2652  CHKERR iface->get_element_connect(size, 4, MBQUAD, 0, starting_handle, array);
2653 
2654  double def_val_nrm2 = 0;
2655  Tag th_nrm2;
2656  CHKERR moab.tag_get_handle("nrm2", 1, MB_TYPE_DOUBLE, th_nrm2,
2657  MB_TAG_CREAT | MB_TAG_DENSE, &def_val_nrm2);
2658 
2659  double def_val_mat_shift = 0;
2660  Tag th_mat_shift;
2661  CHKERR moab.tag_get_handle("mat_shift", 1, MB_TYPE_INTEGER, th_mat_shift,
2662  MB_TAG_CREAT | MB_TAG_DENSE, &def_val_mat_shift);
2663 
2664  int i = 0;
2665  for (auto &d : block_mat_data->blockIndex) {
2666  auto row = d.getRow();
2667  auto col = d.getCol();
2668  auto nb_rows = d.getNbRows();
2669  auto nb_cols = d.getNbCols();
2670  auto mat_shift = d.getMatShift();
2671 
2672  // q0
2673  arrays_coord[0][4 * i + 0] = row;
2674  arrays_coord[1][4 * i + 0] = col;
2675  arrays_coord[2][4 * i + 0] = 0;
2676 
2677  // q1
2678  arrays_coord[0][4 * i + 1] = row + nb_rows;
2679  arrays_coord[1][4 * i + 1] = col;
2680  arrays_coord[2][4 * i + 1] = 0;
2681 
2682  // q2
2683  arrays_coord[0][4 * i + 2] = row + nb_rows;
2684  arrays_coord[1][4 * i + 2] = col + nb_cols;
2685  arrays_coord[2][4 * i + 2] = 0;
2686 
2687  // q3
2688  arrays_coord[0][4 * i + 3] = row;
2689  arrays_coord[1][4 * i + 3] = col + nb_cols;
2690  arrays_coord[2][4 * i + 3] = 0;
2691 
2692  // ele conn
2693  array[4 * i + 0] = starting_vertex + 4 * i + 0;
2694  array[4 * i + 1] = starting_vertex + 4 * i + 1;
2695  array[4 * i + 2] = starting_vertex + 4 * i + 2;
2696  array[4 * i + 3] = starting_vertex + 4 * i + 3;
2697 
2698  auto prt = &(*block_mat_data->dataBlocksPtr)[d.getMatShift()];
2699  auto nrm2 = cblas_dnrm2(nb_rows * nb_cols, prt, 1);
2700  EntityHandle ele = starting_handle + i;
2701  CHKERR moab.tag_set_data(th_nrm2, &ele, 1, &nrm2);
2702  CHKERR moab.tag_set_data(th_mat_shift, &ele, 1, &mat_shift);
2703 
2704  ++i;
2705  }
2706 
2707  CHKERR iface->update_adjacencies(starting_handle, size, 4, array);
2708  CHKERR moab.write_file(filename.c_str(), "VTK", "");
2709 
2711 }
2712 
2713 } // namespace MoFEM
NOSPACE
@ NOSPACE
Definition: definitions.h:83
EigenMatrix::getMat
FTensor::Tensor2_symmetric< double, 3 > getMat(Val< double, 3 > &t_val, Vec< double, 3 > &t_vec, Fun< double > f)
Get the Mat object.
Definition: MatrixFunction.cpp:53
MoFEMFunctionReturnHot
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:447
CHK_MOAB_THROW
#define CHK_MOAB_THROW(err, msg)
Check error code of MoAB function and throw MoFEM exception.
Definition: definitions.h:576
MoFEM::solve_schur_block_shell
static MoFEMErrorCode solve_schur_block_shell(Mat mat, Vec x, Vec y, InsertMode iora)
Definition: Schur.cpp:1553
MoFEM::id_from_handle
auto id_from_handle(const EntityHandle h)
Definition: Templates.hpp:1868
MoFEM::CoreInterface::get_field_name
virtual std::string get_field_name(const BitFieldId id) const =0
get field name from id
MoFEM::SchurElemMats::rowIndices
static boost::ptr_vector< VectorInt > rowIndices
Definition: Schur.cpp:172
MoFEM::EntitiesFieldData::EntData
Data on single entity (This is passed as argument to DataOperator::doWork)
Definition: EntitiesFieldData.hpp:127
MoFEM::DiagBlockIndex
Definition: Schur.cpp:180
MoFEM::EntitiesFieldData::EntData::getFieldEntities
const VectorFieldEntities & getFieldEntities() const
get field entities
Definition: EntitiesFieldData.hpp:1264
MoFEM::BlockStructure::dataBlocksPtr
boost::shared_ptr< std::vector< double > > dataBlocksPtr
Definition: Schur.cpp:274
MoFEM::DiagBlockIndex::Indexes::row
int row
Definition: Schur.cpp:214
MoFEM::zero_rows_columns
static PetscErrorCode zero_rows_columns(Mat A, PetscInt N, const PetscInt rows[], PetscScalar diag, Vec x, Vec b)
Definition: Schur.cpp:1264
MoFEM::OpSchurAssembleEndImpl::offMatInvDiagOffMat
MatrixDouble offMatInvDiagOffMat
Definition: Schur.cpp:88
MoFEM::MatSetValues
MoFEMErrorCode MatSetValues(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const double *ptr, InsertMode iora)
Assemble PETSc matrix.
Definition: EntitiesFieldData.hpp:1631
MoFEM::DiagBlockInvStruture::SchurSolverView
std::pair< std::vector< const Indexes * >, std::vector< int > > SchurSolverView
Definition: Schur.cpp:285
MoFEM::DiagBlockIndex::blockIndex
BlockIndex blockIndex
blocks indexes storage
Definition: Schur.cpp:266
MoFEM::OpSchurAssembleEndImpl::diagBlocks
boost::shared_ptr< BlockStructure > diagBlocks
Definition: Schur.cpp:91
MoFEM::field_bit_from_bit_number
auto field_bit_from_bit_number(const int bit_number)
get field bit id from bit number
Definition: Templates.hpp:1908
MoFEM::DiagBlockIndex::Indexes::nb_cols
int nb_cols
Definition: Schur.cpp:217
MOFEM_LOG_SEVERITY_SYNC
#define MOFEM_LOG_SEVERITY_SYNC(comm, severity)
Synchronise "SYNC" on curtain severity level.
Definition: LogManager.hpp:352
MoFEM::createISGeneral
auto createISGeneral(MPI_Comm comm, PetscInt n, const PetscInt idx[], PetscCopyMode mode)
Creates a data structure for an index set containing a list of integers.
Definition: PetscSmartObj.hpp:287
MOFEM_LOG_CHANNEL
#define MOFEM_LOG_CHANNEL(channel)
Set and reset channel.
Definition: LogManager.hpp:284
EntityHandle
MoFEM::BlockStructure::preconditionerBlocksPtr
boost::shared_ptr< std::vector< double > > preconditionerBlocksPtr
Definition: Schur.cpp:276
MoFEM::FEMethod
structure for User Loop Methods on finite elements
Definition: LoopMethods.hpp:369
MoFEM::MatSetValues< BlockStructure >
MoFEMErrorCode MatSetValues< BlockStructure >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2548
MoFEM::setSchurMatSolvePC
MoFEMErrorCode setSchurMatSolvePC(SmartPetscObj< PC > pc)
Set PC for Schur block.
Definition: Schur.cpp:2476
MoFEM::FieldEntity::getLoLocalEntityBitNumber
static UId getLoLocalEntityBitNumber(const char bit_number, const EntityHandle ent)
Definition: FieldEntsMultiIndices.hpp:247
MoFEM::cmp_uid_hi
static auto cmp_uid_hi(const UId &b, const boost::weak_ptr< FieldEntity > &a)
Definition: ForcesAndSourcesCore.cpp:29
CHK_THROW_MESSAGE
#define CHK_THROW_MESSAGE(err, msg)
Check and throw MoFEM exception.
Definition: definitions.h:596
MoFEM::SchurElemMats::SchurElemStorage
multi_index_container< const SchurElemMats *, indexed_by< ordered_unique< tag< uid_mi_tag >, composite_key< SchurElemMats, member< SchurElemMats, const UId, &SchurElemMats::uidRow >, member< SchurElemMats, const UId, &SchurElemMats::uidCol > > >, ordered_non_unique< tag< col_mi_tag >, member< SchurElemMats, const UId, &SchurElemMats::uidCol > > > > SchurElemStorage
Definition: Schur.cpp:169
MoFEM::OpSchurAssembleBegin::OpSchurAssembleBegin
OpSchurAssembleBegin()
Definition: Schur.cpp:490
MoFEM::shell_block_mat_asmb_wrap_impl
MoFEMErrorCode shell_block_mat_asmb_wrap_impl(BlockStructure *ctx, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora, boost::function< int(const DiagBlockIndex::Indexes *)> shift_extractor, boost::shared_ptr< std::vector< double >> data_blocks_ptr)
Definition: Schur.cpp:1693
MoFEM::solve_add
static PetscErrorCode solve_add(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1260
lapack_dgesv
static __CLPK_integer lapack_dgesv(__CLPK_integer n, __CLPK_integer nrhs, __CLPK_doublereal *a, __CLPK_integer lda, __CLPK_integer *ipiv, __CLPK_doublereal *b, __CLPK_integer ldb)
Definition: lapack_wrap.h:176
MoFEM::Exceptions::MoFEMErrorCode
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
Definition: Exceptions.hpp:56
MoFEM::Types::MatrixDouble
UBlasMatrix< double > MatrixDouble
Definition: Types.hpp:77
MoFEM::CoreInterface::get_field_bit_number
virtual FieldBitNumber get_field_bit_number(const std::string name) const =0
get field bit number
MoFEM::OpSchurAssembleEnd< SchurDSYSV >
Definition: Schur.cpp:105
MoFEM::BlockStructure::ghostX
SmartPetscObj< Vec > ghostX
Definition: Schur.cpp:271
A
constexpr AssemblyType A
Definition: operators_tests.cpp:30
MoFEM::SchurElemMats::getMat
auto & getMat() const
Definition: Schur.cpp:131
MoFEM::SchurDSYSV
Definition: Schur.cpp:1001
MoFEM::OpSchurAssembleBase::MatSetValuesRaw
boost::function< MoFEMErrorCode(Mat mat, PetscInt m, const PetscInt idxm[], PetscInt n, const PetscInt idxn[], const PetscScalar v[], InsertMode addv)> MatSetValuesRaw
Definition: Schur.hpp:46
MoFEM::SchurBackendMatSetValuesPtr::MatSetValuesPtr
boost::function< MoFEMErrorCode(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)> MatSetValuesPtr
Definition: Schur.hpp:217
MoFEM::DiagBlockIndex::Indexes::getNbRows
int getNbRows() const
Definition: Schur.cpp:198
MoFEM::SchurEvents::SchurEvents
SchurEvents()
Definition: Schur.cpp:297
MoFEM::SchurEvents::MOFEM_EVENT_BlockStructureMult
static PetscLogEvent MOFEM_EVENT_BlockStructureMult
Definition: Schur.hpp:30
MoFEM::DiagBlockIndex::Indexes::col
int col
Definition: Schur.cpp:215
MoFEM::shell_block_preconditioner_mat_asmb_wrap
MoFEMErrorCode shell_block_preconditioner_mat_asmb_wrap(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:1974
MoFEM::SchurElemMats::colIndices
static boost::ptr_vector< VectorInt > colIndices
Definition: Schur.cpp:173
MoFEM::SchurEvents::MOFEM_EVENT_schurMatSetValues
static PetscLogEvent MOFEM_EVENT_schurMatSetValues
Definition: Schur.hpp:27
MoFEM::BlockStructure::ghostY
SmartPetscObj< Vec > ghostY
Definition: Schur.cpp:272
sdf.r
int r
Definition: sdf.py:8
MoFEM::DiagBlockIndex::Indexes::getCol
int getCol() const
Definition: Schur.cpp:197
MoFEM::DiagBlockIndex::Indexes
block data indexes
Definition: Schur.cpp:188
MoFEM::debug_schur
constexpr bool debug_schur
Definition: Schur.cpp:12
MoFEM::Interface
DeprecatedCoreInterface Interface
Definition: Interface.hpp:1975
MoFEM::DiagBlockIndex::Indexes::nb_rows
int nb_rows
Definition: Schur.cpp:216
MoFEM::schur_mat_set_values_wrap
MoFEMErrorCode schur_mat_set_values_wrap(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2511
c
const double c
speed of light (cm/ns)
Definition: initial_diffusion.cpp:39
MoFEM::SchurElemMats::col_mi_tag
Definition: Schur.cpp:150
MoFEM::DiagBlockIndex::Indexes::colShift
int colShift() const
Definition: Schur.cpp:209
MoFEM::SchurElemMats::iDX
const size_t iDX
Definition: Schur.cpp:127
MoFEM::SchurElemMats::uidCol
UId uidCol
Definition: Schur.cpp:129
PlasticOps::M
FTensor::Index< 'M', 3 > M
Definition: PlasticOps.hpp:117
MoFEM::OpSchurAssembleEnd< SchurDGESV >
Definition: Schur.cpp:112
CHKERR
#define CHKERR
Inline error check.
Definition: definitions.h:535
MoFEM::DiagBlockIndex::Indexes::getRow
int getRow() const
Definition: Schur.cpp:196
MoFEM::OpSchurAssembleEndImpl::fieldEnts
std::vector< boost::shared_ptr< Range > > fieldEnts
Definition: Schur.cpp:80
MoFEM::createOpSchurAssembleEnd
OpSchurAssembleBase * createOpSchurAssembleEnd(std::vector< std::string > fields_name, std::vector< boost::shared_ptr< Range >> field_ents, std::vector< SmartPetscObj< AO >> sequence_of_aos, std::vector< SmartPetscObj< Mat >> sequence_of_mats, std::vector< bool > sym_schur, bool symm_op, boost::shared_ptr< BlockStructure > diag_blocks)
Construct a new Op Schur Assemble End object.
Definition: Schur.cpp:2442
MoFEM::createDMVector
auto createDMVector(DM dm)
Get smart vector from DM.
Definition: DMMoFEM.hpp:1067
MoFEM::mult_add
static PetscErrorCode mult_add(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1254
MoFEM::createBlockMat
SchurShellMatData createBlockMat(DM dm, boost::shared_ptr< BlockStructure > data)
Create a Schur Mat object.
Definition: Schur.cpp:1387
MoFEM::DiagBlockIndex::Indexes::Indexes
Indexes(int row, int col, int nb_rows, int nb_cols, int loc_row, int loc_col, int mat_shift, int inv_shift)
Definition: Schur.cpp:190
MoFEM::mat_zero
static PetscErrorCode mat_zero(Mat m)
Definition: Schur.cpp:1356
MoFEM::OpSchurAssembleBegin
Clear Schur complement internal data.
Definition: Schur.cpp:18
MoFEM::SchurElemMats::SchurElemMats
SchurElemMats(const size_t idx, const UId uid_row, const UId uid_col)
Definition: Schur.cpp:313
MoFEM
implementation of Data Operators for Forces and Sources
Definition: Common.hpp:10
MoFEM::SchurEvents::MOFEM_EVENT_BlockStructureSolve
static PetscLogEvent MOFEM_EVENT_BlockStructureSolve
Definition: Schur.hpp:31
a
constexpr double a
Definition: approx_sphere.cpp:30
MoFEM::mult_schur_block_shell
static MoFEMErrorCode mult_schur_block_shell(Mat mat, Vec x, Vec y, InsertMode iora)
Definition: Schur.cpp:1421
MoFEM::schurSwitchPreconditioner
MoFEMErrorCode schurSwitchPreconditioner(boost::shared_ptr< BlockStructure > block_mat_data)
Switch preconditioner.
Definition: Schur.cpp:2620
BITFIELDID_SIZE
#define BITFIELDID_SIZE
max number of fields
Definition: definitions.h:220
MoFEM::SchurElemMats::assembleStorage
static MoFEMErrorCode assembleStorage(const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:318
MoFEM::DiagBlockInvStruture::indexView
SchurSolverView indexView
Definition: Schur.cpp:287
MoFEM::SchurDGESV::invertMat
static auto invertMat(const SchurElemMats *row_ptr, MatrixDouble &inv, double eps)
Definition: Schur.cpp:1036
MoFEM::DiagBlockIndex::Indexes::mat_shift
int mat_shift
Definition: Schur.cpp:220
MoFEM::FieldEntity::getHandleFromUniqueId
static auto getHandleFromUniqueId(const UId uid)
Get the Handle From Unique Id.
Definition: FieldEntsMultiIndices.hpp:192
MoFEM::Types::UId
uint128_t UId
Unique Id.
Definition: Types.hpp:31
MoFEM::DiagBlockIndex::BlockIndex
multi_index_container< Indexes, indexed_by< ordered_non_unique< composite_key< Indexes, const_mem_fun< Indexes, int, &Indexes::getLocRow >, const_mem_fun< Indexes, int, &Indexes::getLocCol >, const_mem_fun< Indexes, int, &Indexes::getNbRows >, const_mem_fun< Indexes, int, &Indexes::getNbCols > > >, ordered_unique< composite_key< Indexes, const_mem_fun< Indexes, int, &Indexes::getRow >, const_mem_fun< Indexes, int, &Indexes::getCol >, const_mem_fun< Indexes, int, &Indexes::getNbRows >, const_mem_fun< Indexes, int, &Indexes::getNbCols > > >, ordered_non_unique< const_mem_fun< Indexes, int, &Indexes::rowShift > >, ordered_non_unique< const_mem_fun< Indexes, int, &Indexes::colShift > > > > BlockIndex
Definition: Schur.cpp:264
lapack_dsysv
static __CLPK_integer lapack_dsysv(char uplo, __CLPK_integer n, __CLPK_integer nrhs, __CLPK_doublereal *a, __CLPK_integer lda, __CLPK_integer *ipiv, __CLPK_doublereal *b, __CLPK_integer ldb, __CLPK_doublereal *work, __CLPK_integer lwork)
Definition: lapack_wrap.h:220
MoFEM::DiagBlockIndex::~DiagBlockIndex
virtual ~DiagBlockIndex()=default
MoFEM::SchurElemMats::uid_mi_tag
Definition: Schur.cpp:149
MoFEM::SchurElemMats::getColInd
auto & getColInd() const
Definition: Schur.cpp:133
double
MoFEM::DiagBlockInvStruture
Definition: Schur.cpp:282
MoFEM::BlockStructure::parentBlockStructurePtr
boost::shared_ptr< std::vector< double > > parentBlockStructurePtr
Definition: Schur.cpp:277
convert.type
type
Definition: convert.py:64
MoFEM::FieldEntity::getLocalUniqueIdCalculate
UId getLocalUniqueIdCalculate()
Get the Local Unique Id Calculate object.
Definition: FieldEntsMultiIndices.hpp:136
MoFEM::DiagBlockIndex::Indexes::rowShift
int rowShift() const
Definition: Schur.cpp:205
MoFEM::SchurElemMats::~SchurElemMats
virtual ~SchurElemMats()=default
EshelbianPlasticity::P
@ P
Definition: EshelbianContact.cpp:193
MoFEM::DiagBlockIndex::Indexes::getLocRow
int getLocRow() const
Definition: Schur.cpp:200
MoFEM::cmp_uid_lo
static auto cmp_uid_lo(const boost::weak_ptr< FieldEntity > &a, const UId &b)
Definition: ForcesAndSourcesCore.cpp:18
MoFEM::DiagBlockIndex::Indexes::loc_col
int loc_col
Definition: Schur.cpp:219
MoFEM::OpSchurAssembleBegin::doWork
MoFEMErrorCode doWork(int side, EntityType type, EntitiesFieldData::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
Definition: Schur.cpp:493
MoFEM::SchurElemMats::schurL2Storage
static SchurElemStorage schurL2Storage
Definition: Schur.cpp:177
MoFEM::EntitiesFieldData::EntData::getIndices
const VectorInt & getIndices() const
Get global indices of dofs on entity.
Definition: EntitiesFieldData.hpp:1201
MoFEM::OpSchurAssembleEndImpl::diagEps
std::vector< double > diagEps
Definition: Schur.cpp:84
MoFEM::SchurElemMats::OpSchurAssembleBegin
friend OpSchurAssembleBegin
Definition: Schur.cpp:146
MoFEM::createOpSchurAssembleBegin
OpSchurAssembleBase * createOpSchurAssembleBegin()
Definition: Schur.cpp:2437
MoFEM::ForcesAndSourcesCore::UserDataOperator::getPtrFE
ForcesAndSourcesCore * getPtrFE() const
Definition: ForcesAndSourcesCore.hpp:1250
MoFEM::SchurElemMats::maxIndexCounter
static size_t maxIndexCounter
Definition: Schur.cpp:175
MoFEM::DiagBlockIndex::Indexes::inv_shift
int inv_shift
Definition: Schur.cpp:221
MoFEM::SchurEvents::MOFEM_EVENT_opSchurAssembleEnd
static PetscLogEvent MOFEM_EVENT_opSchurAssembleEnd
Definition: Schur.hpp:28
MoFEM::CoreInterface::get_field_ents
virtual const FieldEntity_multiIndex * get_field_ents() const =0
Get the field ents object.
MoFEM::solve
static PetscErrorCode solve(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1257
MoFEM::OpSchurAssembleEndImpl::OpSchurAssembleEndImpl
OpSchurAssembleEndImpl(std::vector< std::string > fields_name, std::vector< boost::shared_ptr< Range >> field_ents, std::vector< SmartPetscObj< AO >> sequence_of_aos, std::vector< SmartPetscObj< Mat >> sequence_of_mats, std::vector< bool > sym_schur, bool symm_op=true, boost::shared_ptr< BlockStructure > diag_blocks=nullptr)
Construct a new Op Schur Assemble End object.
Definition: Schur.cpp:518
MoFEM::SchurElemMatsBlock
Definition: Schur.cpp:2554
MoFEM::DiagBlockIndex::Indexes::getLocCol
int getLocCol() const
Definition: Schur.cpp:201
MoFEM::OpSchurAssembleEndImpl::invMat
MatrixDouble invMat
Definition: Schur.cpp:86
MOFEM_OPERATION_UNSUCCESSFUL
@ MOFEM_OPERATION_UNSUCCESSFUL
Definition: definitions.h:34
MoFEM::type_from_handle
auto type_from_handle(const EntityHandle h)
get type from entity handle
Definition: Templates.hpp:1876
MoFEM::EntitiesFieldData::EntData::getFieldDofs
const VectorDofs & getFieldDofs() const
get dofs data stature FEDofEntity
Definition: EntitiesFieldData.hpp:1256
MoFEM::mult
static PetscErrorCode mult(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1251
MoFEM::BlockStructure::dataInvBlocksPtr
boost::shared_ptr< std::vector< double > > dataInvBlocksPtr
Definition: Schur.cpp:275
MoFEM::PetscGlobalIdx_mi_tag
Definition: TagMultiIndices.hpp:38
MoFEM::DiagBlockIndex::Indexes::getMatShift
int & getMatShift() const
Definition: Schur.cpp:202
MoFEM::createSchurNestedMatrix
std::pair< SmartPetscObj< Mat >, boost::shared_ptr< NestSchurData > > createSchurNestedMatrix(boost::shared_ptr< NestSchurData > schur_net_data_ptr)
Create a Mat Diag Blocks object.
Definition: Schur.cpp:2409
MoFEM::OpSchurAssembleEndImpl::transOffMatInvDiagOffMat
MatrixDouble transOffMatInvDiagOffMat
Definition: Schur.cpp:89
MoFEM::MatSetValues< SchurElemMatsPreconditionedBlock >
MoFEMErrorCode MatSetValues< SchurElemMatsPreconditionedBlock >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2611
MoFEM::SchurBackendMatSetValuesPtr::matSetValuesPtr
static MatSetValuesPtr matSetValuesPtr
backend assembly function
Definition: Schur.hpp:218
i
FTensor::Index< 'i', SPACE_DIM > i
Definition: hcurl_divergence_operator_2d.cpp:27
MoFEM::SchurElemMatsPreconditionedBlock
Definition: Schur.cpp:2587
MoFEM::SchurElemMatsPreconditionedBlock::MatSetValues
static MoFEMErrorCode MatSetValues(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2599
field_name
constexpr auto field_name
Definition: poisson_2d_homogeneous.cpp:13
MoFEM::SchurElemMats::schurElemMats
static boost::ptr_vector< SchurElemMats > schurElemMats
Definition: Schur.cpp:174
MoFEM::SchurElemMats::MatSetValues
static MoFEMErrorCode MatSetValues(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2525
MoFEM::SchurDSYSV::invertMat
static auto invertMat(const SchurElemMats *row_ptr, MatrixDouble &inv, double eps)
Definition: Schur.cpp:1002
MoFEM::OpSchurAssembleBase
Definition: Schur.hpp:40
convert.n
n
Definition: convert.py:82
MoFEM::SchurElemMats
Schur complement data storage.
Definition: Schur.cpp:122
N
const int N
Definition: speed_test.cpp:3
v
const double v
phase velocity of light in medium (cm/ns)
Definition: initial_diffusion.cpp:40
MoFEM::setSchurBlockMatOps
static MoFEMErrorCode setSchurBlockMatOps(Mat mat_raw)
Definition: Schur.cpp:1370
MoFEM::EssentialBcStorage
[Storage and set boundary conditions]
Definition: FormsIntegrators.hpp:14
MoFEM::OpSchurAssembleEndImpl
Assemble Schur complement (Implementation)
Definition: Schur.cpp:30
MOFEM_TAG_AND_LOG
#define MOFEM_TAG_AND_LOG(channel, severity, tag)
Tag and log in channel.
Definition: LogManager.hpp:362
MoFEM::getInterfacePtr
auto getInterfacePtr(DM dm)
Get the Interface Ptr object.
Definition: DMMoFEM.hpp:1014
MOFEM_LOG
#define MOFEM_LOG(channel, severity)
Log.
Definition: LogManager.hpp:308
MoFEM::SchurBackendMatSetValuesPtr::matSetValuesPreconditionedBlockPtr
static MatSetValuesPtr matSetValuesPreconditionedBlockPtr
Definition: Schur.hpp:222
MoFEM::vectorDuplicate
SmartPetscObj< Vec > vectorDuplicate(Vec vec)
Create duplicate vector of smart vector.
Definition: PetscSmartObj.hpp:221
MoFEM::DiagBlockIndex::Indexes::loc_row
int loc_row
Definition: Schur.cpp:218
MoFEM::shell_block_mat_asmb_wrap
MoFEMErrorCode shell_block_mat_asmb_wrap(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:1954
MoFEM::MatSetValues< SchurElemMats >
MoFEMErrorCode MatSetValues< SchurElemMats >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2504
MoFEM::Core
CoreTmp< 0 > Core
Definition: Core.hpp:1102
std
Definition: enable_if.hpp:5
HenckyOps::f
auto f
Definition: HenckyOps.hpp:15
MoFEM::Types::VectorInt
UBlasVector< int > VectorInt
Definition: Types.hpp:67
eps
static const double eps
Definition: check_base_functions_derivatives_on_tet.cpp:11
MoFEM::SchurElemMats::uidRow
UId uidRow
Definition: Schur.cpp:128
MoFEM::OpSchurAssembleBase::matSetValuesSchurRaw
static MatSetValuesRaw matSetValuesSchurRaw
Definition: Schur.hpp:47
MoFEM::OpSchurAssembleEndImpl::fieldsName
std::vector< std::string > fieldsName
Definition: Schur.cpp:79
MoFEM::OpSchurAssembleEnd
Assemble Schur complement.
Definition: Schur.cpp:102
UBlasVector< double >
MoFEM::Exceptions::ierr
static MoFEMErrorCodeGeneric< PetscErrorCode > ierr
Definition: Exceptions.hpp:76
MoFEM::getProblemPtr
auto getProblemPtr(DM dm)
get problem pointer from DM
Definition: DMMoFEM.hpp:1025
EigenMatrix::Vec
const FTensor::Tensor2< T, Dim, Dim > Vec
Definition: MatrixFunction.hpp:66
MOFEM_DATA_INCONSISTENCY
@ MOFEM_DATA_INCONSISTENCY
Definition: definitions.h:31
MoFEM::OpSchurAssembleEndImpl::doWorkImpl
MoFEMErrorCode doWorkImpl(int side, EntityType type, EntitiesFieldData::EntData &data)
Definition: Schur.cpp:531
sdf_hertz_2d_axisymm_plane.d
float d
Definition: sdf_hertz_2d_axisymm_plane.py:4
FTensor::transform
Tensor2_Expr< transform_Tensor2< A, B, T, Dim0, Dim1, i, j >, T, Dim0, Dim1, i, j > transform(const Tensor2_Expr< A, T, Dim0, Dim1, i, j > &a, B function)
Definition: Tensor2_transform.hpp:27
MoFEM::SchurShellMatData
std::pair< SmartPetscObj< Mat >, boost::shared_ptr< BlockStructure > > SchurShellMatData
Definition: Schur.hpp:117
MoFEM::OpSchurAssembleEndImpl::sequenceOfAOs
std::vector< SmartPetscObj< AO > > sequenceOfAOs
Definition: Schur.cpp:81
MoFEM::FieldEntity::getHiBitNumberUId
static UId getHiBitNumberUId(const FieldBitNumber bit_number)
Definition: FieldEntsMultiIndices.hpp:228
MoFEM::DiagBlockIndex::Indexes::getNbCols
int getNbCols() const
Definition: Schur.cpp:199
MoFEM::FieldEntity::getHiLocalEntityBitNumber
static UId getHiLocalEntityBitNumber(const char bit_number, const EntityHandle ent)
Definition: FieldEntsMultiIndices.hpp:258
MoFEM::SchurElemMatsBlock::MatSetValues
static MoFEMErrorCode MatSetValues(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2567
MoFEM::OpSchurAssembleEndImpl::sequenceOfMats
std::vector< SmartPetscObj< Mat > > sequenceOfMats
Definition: Schur.cpp:82
m
FTensor::Index< 'm', 3 > m
Definition: shallow_wave.cpp:80
MoFEM::BlockStructure::multiplyByPreconditioner
bool multiplyByPreconditioner
Definition: Schur.cpp:279
MoFEM::SchurElemMats::locMats
static boost::ptr_vector< MatrixDouble > locMats
Definition: Schur.cpp:171
MoFEM::createBlockMatStructure
boost::shared_ptr< BlockStructure > createBlockMatStructure(DM dm, SchurFEOpsFEandFields schur_fe_op_vec)
Create a Mat Diag Blocks object.
Definition: Schur.cpp:1084
MoFEMFunctionBeginHot
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:440
MoFEM::OpSchurAssembleEndImpl::symSchur
std::vector< bool > symSchur
Definition: Schur.cpp:83
sdf_wavy_2d.ind
float ind
Definition: sdf_wavy_2d.py:8
MoFEM::SchurBackendMatSetValuesPtr::matSetValuesBlockPtr
static MatSetValuesPtr matSetValuesBlockPtr
backend assembly block mat function
Definition: Schur.hpp:220
MoFEM::SmartPetscObj< AO >
MoFEM::MatSetValues< SchurElemMatsBlock >
MoFEMErrorCode MatSetValues< SchurElemMatsBlock >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2580
MoFEM::ent_form_type_and_id
auto ent_form_type_and_id(const EntityType type, const EntityID id)
get entity handle from type and id
Definition: Templates.hpp:1884
MoFEM::BlockStructure
Definition: Schur.cpp:269
MoFEM::SchurDGESV
Definition: Schur.cpp:1035
MoFEM::SchurEvents::MOFEM_EVENT_zeroRowsAndCols
static PetscLogEvent MOFEM_EVENT_zeroRowsAndCols
Definition: Schur.hpp:32
MoFEM::schurSaveBlockMesh
MoFEMErrorCode schurSaveBlockMesh(boost::shared_ptr< BlockStructure > block_mat_data, std::string filename)
Save block matrix as a mesh.
Definition: Schur.cpp:2635
MoFEM::SchurElemMats::getRowInd
auto & getRowInd() const
Definition: Schur.cpp:132
MoFEM::DMoFEMLoopFiniteElements
PetscErrorCode DMoFEMLoopFiniteElements(DM dm, const char fe_name[], MoFEM::FEMethod *method, CacheTupleWeakPtr cache_ptr=CacheTupleSharedPtr())
Executes FEMethod for finite elements in DM.
Definition: DMMoFEM.cpp:586
MoFEM::FieldEntity::getFieldBitNumberFromUniqueId
static auto getFieldBitNumberFromUniqueId(const UId uid)
Get the Field Bit Number From Unique Id.
Definition: FieldEntsMultiIndices.hpp:205
convert.int
int
Definition: convert.py:64
MoFEMFunctionReturn
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:416
MOFEM_NOT_IMPLEMENTED
@ MOFEM_NOT_IMPLEMENTED
Definition: definitions.h:32
MoFEM::OpSchurAssembleEndImpl::invDiagOffMat
MatrixDouble invDiagOffMat
Definition: Schur.cpp:87
MoFEM::DiagBlockIndex::Indexes::getInvShift
int & getInvShift() const
Definition: Schur.cpp:203
MoFEM::SchurEvents::MOFEM_EVENT_BlockStructureSetValues
static PetscLogEvent MOFEM_EVENT_BlockStructureSetValues
Definition: Schur.hpp:29
MoFEMFunctionBegin
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:346
MoFEM::SchurFEOpsFEandFields
std::vector< std::pair< std::string, std::vector< SchurFieldPair > > > SchurFEOpsFEandFields
Definition: Schur.hpp:102
MoFEM::isAllGather
auto isAllGather(IS is)
IS All gather.
Definition: PetscSmartObj.hpp:300
MoFEM::ForcesAndSourcesCore::mField
Interface & mField
Definition: ForcesAndSourcesCore.hpp:24
MoFEM::getNestSchurData
boost::shared_ptr< NestSchurData > getNestSchurData(std::pair< SmartPetscObj< DM >, SmartPetscObj< DM >> dms, boost::shared_ptr< BlockStructure > block_mat_data_ptr, std::vector< std::string > fields_names, std::vector< boost::shared_ptr< Range >> field_ents, bool add_preconditioner_block)
Get the Schur Nest Mat Array object.
Definition: Schur.cpp:1991
MoFEM::SchurElemMats::OpSchurAssembleEndImpl
friend OpSchurAssembleEndImpl
Definition: Schur.cpp:147