v0.14.0
Schur.cpp
Go to the documentation of this file.
1 /**
2  * @file Schur.cpp
3  * @brief Implementation of Schur Complement
4  * @date 2023-02-03
5  *
6  * @copyright Copyright (c) 2023
7  *
8  */
9 
10 namespace MoFEM {
11 
12 constexpr bool debug_schur = false;
13 
15  OpSchurAssembleBaseImpl() = delete;
16 
17 protected:
19 
20  /**
21  * @brief Assemble Schur complement
22  *
23  */
25 
26  Mat S,
27 
28  const UId &uid_row, VectorInt &row_ind,
29 
30  const UId &uid_col, VectorInt &col_ind,
31 
32  MatrixDouble &m, InsertMode iora
33 
34  );
35 };
36 
37 // Assemble specialisations
39 
40  Mat S,
41 
42  const UId &uid_row, VectorInt &row_ind,
43 
44  const UId &uid_col, VectorInt &col_ind,
45 
46  MatrixDouble &m, InsertMode iora
47 
48 ) {
50 
51  S, row_ind.size(), &*row_ind.begin(), col_ind.size(), &*col_ind.begin(),
52  &*m.data().begin(), iora
53 
54  );
55 }
56 
57 /**
58  * @brief Clear Schur complement internal data
59  *
60  */
62 
64 
65  MoFEMErrorCode doWork(int side, EntityType type,
67 };
68 
69 /**
70  * @brief Assemble Schur complement (Implementation)
71  *
72  */
73 template <typename OP_SCHUR_ASSEMBLE_BASE>
75 
77 
78  /**
79  * @brief Construct a new Op Schur Assemble End object
80  *
81  * @param fields_name list of fields
82  * @param field_ents list of entities on which schur complement is applied
83  * (can be empty)
84  * @param sequence_of_aos list of maps from base problem to Schur complement
85  * matrix
86  * @param sequence_of_mats list of Schur complement matrices
87  * @param sym_schur true if Schur complement is symmetric
88  * @param symm_op true if block diagonal is symmetric
89  */
91  std::vector<std::string> fields_name,
92  std::vector<boost::shared_ptr<Range>> field_ents,
93  std::vector<SmartPetscObj<AO>> sequence_of_aos,
94  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
95  std::vector<bool> sym_schur, bool symm_op = true,
96  boost::shared_ptr<BlockStructure> diag_blocks = nullptr);
97 
98  /**
99  * @brief Construct a new Op Schur Assemble End object
100  *
101  * @param fields_name list of fields
102  * @param field_ents list of entities on which schur complement is applied
103  * (can be empty)
104  * @param sequence_of_aos list of maps from base problem to Schur complement
105  * matrix
106  * @param sequence_of_mats list of Schur complement matrices
107  * @param sym_schur true if Schur complement is symmetric
108  * @param diag_eps add epsilon on diagonal of inverted matrix
109  * @param symm_op true if block diagonal is symmetric
110  */
112  std::vector<std::string> fields_name,
113  std::vector<boost::shared_ptr<Range>> field_ents,
114  std::vector<SmartPetscObj<AO>> sequence_of_aos,
115  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
116  std::vector<bool> sym_schur, std::vector<double> diag_eps,
117  bool symm_op = true,
118  boost::shared_ptr<BlockStructure> diag_blocks = nullptr);
119 
120 protected:
121  template <typename I>
123 
124  int side, EntityType type, EntitiesFieldData::EntData &data
125 
126  );
127 
128  std::vector<std::string> fieldsName;
129  std::vector<boost::shared_ptr<Range>> fieldEnts;
130  std::vector<SmartPetscObj<AO>> sequenceOfAOs;
131  std::vector<SmartPetscObj<Mat>> sequenceOfMats;
132  std::vector<bool> symSchur;
133  std::vector<double> diagEps;
134 
139 
140  boost::shared_ptr<BlockStructure> diagBlocks;
141 };
142 
143 struct SchurDSYSV; ///< SY symmetric
144 struct SchurDGESV; ///< GE general (i.e., nonsymmetric, in some cases
145  ///< rectangular)
146 
147 /**
148  * @brief Assemble Schur complement
149  *
150  */
151 template <typename I> struct OpSchurAssembleEnd;
152 
153 template <>
155  : public OpSchurAssembleEndImpl<OpSchurAssembleBaseImpl> {
157  MoFEMErrorCode doWork(int side, EntityType type,
159 };
160 
161 template <>
163  : public OpSchurAssembleEndImpl<OpSchurAssembleBaseImpl> {
165  MoFEMErrorCode doWork(int side, EntityType type,
167 };
168 
169 /**
170  * @brief Schur complement data storage
171  *
172  */
173 struct SchurElemMats : public boost::enable_shared_from_this<SchurElemMats> {
174 
175  SchurElemMats(const size_t idx, const UId uid_row, const UId uid_col);
176  virtual ~SchurElemMats() = default;
177 
178  const size_t iDX;
181 
182  inline auto &getMat() const { return locMats[iDX]; }
183  inline auto &getRowInd() const { return rowIndices[iDX]; }
184  inline auto &getColInd() const { return colIndices[iDX]; }
185 
186  static MoFEMErrorCode MatSetValues(Mat M,
187  const EntitiesFieldData::EntData &row_data,
188  const EntitiesFieldData::EntData &col_data,
189  const MatrixDouble &mat, InsertMode iora);
190 
191 protected:
192  static MoFEMErrorCode
194  const EntitiesFieldData::EntData &col_data,
195  const MatrixDouble &mat, InsertMode iora);
196 
197  friend struct OpSchurAssembleBegin;
198  template <typename OP_SCHUR_ASSEMBLE_BASE>
199  friend struct OpSchurAssembleEndImpl;
200 
201  struct uid_mi_tag {};
202  struct col_mi_tag {};
203 
204  using SchurElemStorage = multi_index_container<
205  const SchurElemMats *,
206  indexed_by<
207 
208  ordered_unique<
209  tag<uid_mi_tag>,
210  composite_key<
212 
213  member<SchurElemMats, const UId, &SchurElemMats::uidRow>,
214  member<SchurElemMats, const UId, &SchurElemMats::uidCol>
215 
216  >>,
217 
218  ordered_non_unique<tag<col_mi_tag>, member<SchurElemMats, const UId,
220 
221  >>;
222 
223  static boost::ptr_vector<MatrixDouble> locMats;
224  static boost::ptr_vector<VectorInt> rowIndices;
225  static boost::ptr_vector<VectorInt> colIndices;
226  static boost::ptr_vector<SchurElemMats> schurElemMats;
227  static size_t maxIndexCounter;
228 
230 };
231 
233 
234  virtual ~DiagBlockIndex() = default;
235 
236  /**
237  * @brief block data indexes
238  *
239  */
240  struct Indexes {
241 
242  Indexes(int row, int col, int nb_rows, int nb_cols, int loc_row,
243  int loc_col, int mat_shift, int inv_shift)
246  inv_shift(inv_shift) {}
247 
248  inline int getRow() const { return row; }
249  inline int getCol() const { return col; }
250  inline int getNbRows() const { return nb_rows; }
251  inline int getNbCols() const { return nb_cols; }
252  inline int getLocRow() const { return loc_row; }
253  inline int getLocCol() const { return loc_col; }
254  inline int &getMatShift() const { return mat_shift; }
255  inline int &getInvShift() const { return inv_shift; }
256 
257  inline int rowShift() const {
258  return getRow() + getNbRows();
259  } // shift such that lower bound is included
260 
261  inline int colShift() const {
262  return getCol() + getNbCols();
263  } // shift such that lower bound is included
264 
265  private:
266  int row;
267  int col;
268  int nb_rows;
269  int nb_cols;
270  int loc_row;
271  int loc_col;
272  mutable int mat_shift;
273  mutable int inv_shift;
274  };
275 
276  using BlockIndex = multi_index_container<
277 
278  Indexes,
279 
280  indexed_by<
281 
282  ordered_non_unique<
283 
284  composite_key<Indexes,
285 
286  const_mem_fun<Indexes, int, &Indexes::getLocRow>,
287  const_mem_fun<Indexes, int, &Indexes::getLocCol>,
288  const_mem_fun<Indexes, int, &Indexes::getNbRows>,
289  const_mem_fun<Indexes, int, &Indexes::getNbCols>>
290 
291  >,
292 
293  ordered_unique<
294 
295  composite_key<Indexes,
296 
297  const_mem_fun<Indexes, int, &Indexes::getRow>,
298  const_mem_fun<Indexes, int, &Indexes::getCol>,
299  const_mem_fun<Indexes, int, &Indexes::getNbRows>,
300  const_mem_fun<Indexes, int, &Indexes::getNbCols>>
301 
302  >,
303 
304  ordered_non_unique<
305 
306  const_mem_fun<Indexes, int, &Indexes::rowShift>
307 
308  >,
309 
310  ordered_non_unique<
311 
312  const_mem_fun<Indexes, int, &Indexes::colShift>
313 
314  >
315 
316  >>;
317 
318  BlockIndex blockIndex; ///< blocks indexes storage
319 };
320 
322 
325 
326  boost::shared_ptr<std::vector<double>> dataBlocksPtr;
327  boost::shared_ptr<std::vector<double>> dataInvBlocksPtr;
328  boost::shared_ptr<std::vector<double>> preconditionerBlocksPtr;
329  boost::shared_ptr<std::vector<double>> parentBlockStructurePtr;
330 
332 };
333 
335 
336  // first value is value of off diagonal block index data, second is shift on
337  // the diagonal
338  struct A00SolverView {
339  std::vector<const Indexes *> lowView;
340  std::vector<int> diagLoRange;
341  std::vector<const Indexes *> upView;
342  std::vector<int> diagUpRange;
343  };
344 
346 };
347 
354 
356  PetscLogEventRegister("schurMatSetVal", 0, &MOFEM_EVENT_schurMatSetValues);
357  PetscLogEventRegister("opSchurAsmEnd", 0, &MOFEM_EVENT_opSchurAssembleEnd);
358  PetscLogEventRegister("blockSetVal", 0, &MOFEM_EVENT_BlockStructureSetValues);
359  PetscLogEventRegister("blockMult", 0, &MOFEM_EVENT_BlockStructureMult);
360  PetscLogEventRegister("blockSolve", 0, &MOFEM_EVENT_BlockStructureSolve);
361  PetscLogEventRegister("schurZeroRandC", 0, &MOFEM_EVENT_zeroRowsAndCols);
362 }
363 
365 boost::ptr_vector<MatrixDouble> SchurElemMats::locMats;
366 boost::ptr_vector<VectorInt> SchurElemMats::rowIndices;
367 boost::ptr_vector<VectorInt> SchurElemMats::colIndices;
368 boost::ptr_vector<SchurElemMats> SchurElemMats::schurElemMats;
370 
371 SchurElemMats::SchurElemMats(const size_t idx, const UId uid_row,
372  const UId uid_col)
373  : iDX(idx), uidRow(uid_row), uidCol(uid_col) {}
374 
377  const EntitiesFieldData::EntData &col_data,
378  const MatrixDouble &mat, InsertMode iora) {
380 
381 #ifndef NDEBUG
382  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_schurMatSetValues, 0, 0, 0, 0);
383 #endif // NDEBUG
384 
385  // get row indices, in case of store, get indices from storage
386  // storage keeps marked indices to manage boundary conditions
387  auto get_row_indices = [&]() -> const VectorInt & {
388  if (auto e_ptr = row_data.getFieldEntities()[0]) {
389  if (auto stored_data_ptr =
390  e_ptr->getSharedStoragePtr<EssentialBcStorage>()) {
391  return stored_data_ptr->entityIndices;
392  }
393  }
394  return row_data.getIndices();
395  };
396 
397  const auto &row_ind = get_row_indices();
398  const auto &col_ind = col_data.getIndices();
399 
400  const auto nb_rows = row_ind.size();
401  const auto nb_cols = col_ind.size();
402 
403 #ifndef NDEBUG
404  if (mat.size1() != nb_rows) {
405  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
406  "Wrong mat size %d != %d", mat.size1(), nb_rows);
407  }
408  if (mat.size2() != nb_cols) {
409  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
410  "Wrong mat size %d != %d", mat.size2(), nb_cols);
411  }
412 #endif // NDEBUG
413 
414  // get entity uid
415  auto get_uid = [](auto &data) {
416  if (data.getFieldEntities().size() == 1) {
417 
418  return data.getFieldEntities()[0]->getLocalUniqueId();
419 
420  } else {
421 
422  // Is assumed that sum of entities ids gives unique id, that is not true,
423  // but corner case is improbable.
424 
425  // @todo: debug version should have test
426 
427  auto &uid0 = data.getFieldEntities()[0]->getLocalUniqueId();
428  auto field_id0 = FieldEntity::getFieldBitNumberFromUniqueId(uid0);
429  auto ent0 = FieldEntity::getHandleFromUniqueId(uid0);
430  auto type0 = type_from_handle(ent0);
431  auto id = id_from_handle(ent0);
432 
433  for (auto i = 1; i < data.getFieldEntities().size(); ++i) {
434 
435  // get entity id from ent
436  id += id_from_handle(
437 
438  // get entity handle from unique uid
440  data.getFieldEntities()[i]->getLocalUniqueId())
441 
442  );
443  }
444 
446  field_id0,
447 
448  ent_form_type_and_id(type0, id)
449 
450  );
451  }
452  };
453 
454  auto uid_row = get_uid(row_data);
455  auto uid_col = get_uid(col_data);
456 
457  auto it =
458  SchurElemMats::schurL2Storage.template get<SchurElemMats::uid_mi_tag>()
459  .find(boost::make_tuple(uid_row, uid_col));
460 
461  if (it ==
462  SchurElemMats::schurL2Storage.template get<SchurElemMats::uid_mi_tag>()
463  .end()) {
464 
465  // get size of arrays of matrices
466  const auto size = SchurElemMats::locMats.size();
467 
468  // expand memory allocation
469  if (SchurElemMats::maxIndexCounter == size) {
470  SchurElemMats::locMats.push_back(new MatrixDouble());
471  SchurElemMats::rowIndices.push_back(new VectorInt());
472  SchurElemMats::colIndices.push_back(new VectorInt());
474  new SchurElemMats(SchurElemMats::maxIndexCounter, uid_row, uid_col));
475  } else {
477  uid_row;
479  uid_col;
480  }
481 
482  // add matrix to storage
483  auto p = SchurElemMats::schurL2Storage.emplace(
485 #ifndef NDEBUG
486  if (!p.second) {
487  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "Failed to insert");
488  }
489 #endif // NDEBUG
490 
491  auto asmb = [&](auto &sm) {
492  sm.resize(nb_rows, nb_cols, false);
493  noalias(sm) = mat;
494  };
495 
496  asmb((*p.first)->getMat());
497 
498  auto add_indices = [](auto &storage, auto &ind) {
499  storage.resize(ind.size(), false);
500  noalias(storage) = ind;
501  };
502 
503  add_indices((*p.first)->getRowInd(), row_ind);
504  add_indices((*p.first)->getColInd(), col_ind);
505 
506  } else {
507  // entry (submatrix) already exists
508 
509  auto asmb = [&](auto &sm) {
511 
512 #ifndef NDEBUG
513  if (sm.size1() != nb_rows) {
514  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
515  "Wrong mat or storage size %d != %d", sm.size1(), nb_rows);
516  }
517  if (sm.size2() != nb_cols) {
518  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
519  "Wrong mat or storage size %d != %d", sm.size2(), nb_cols);
520  }
521 #endif // NDEBUG
522 
523  switch (iora) {
524  case ADD_VALUES:
525  sm += mat;
526  break;
527  case INSERT_VALUES:
528  noalias(sm) = mat;
529  break;
530  default:
531  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED,
532  "Assembly type not implemented");
533  }
535  };
536 
537  CHKERR asmb((*it)->getMat());
538  // no need to set indices
539  }
540 
541 #ifndef NDEBUG
542  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_schurMatSetValues, 0, 0, 0, 0);
543 #endif // NDEBUG
544 
546 }
547 
549  : OpSchurAssembleBaseImpl(NOSPACE, OPSPACE) {}
550 
554 #ifndef NDEBUG
555  if constexpr (debug_schur)
556  MOFEM_LOG("SELF", Sev::noisy) << "Schur assemble begin";
557 #endif
560 
562 }
563 
564 template <typename OP_SCHUR_ASSEMBLE_BASE>
566  std::vector<std::string> fields_name,
567  std::vector<boost::shared_ptr<Range>> field_ents,
568  std::vector<SmartPetscObj<AO>> sequence_of_aos,
569  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
570  std::vector<bool> sym_schur, std::vector<double> diag_eps, bool symm_op,
571  boost::shared_ptr<BlockStructure> diag_blocks)
572  : OP(NOSPACE, OP::OPSPACE, symm_op), fieldsName(fields_name),
573  fieldEnts(field_ents), sequenceOfAOs(sequence_of_aos),
574  sequenceOfMats(sequence_of_mats), symSchur(sym_schur), diagEps(diag_eps),
575  diagBlocks(diag_blocks) {}
576 
577 template <typename OP_SCHUR_ASSEMBLE_BASE>
579  std::vector<std::string> fields_name,
580  std::vector<boost::shared_ptr<Range>> field_ents,
581  std::vector<SmartPetscObj<AO>> sequence_of_aos,
582  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
583  std::vector<bool> sym_schur, bool symm_op,
584  boost::shared_ptr<BlockStructure> diag_blocks)
586  fields_name, field_ents, sequence_of_aos, sequence_of_mats, sym_schur,
587  std::vector<double>(fields_name.size(), 0), symm_op, diag_blocks) {}
588 
589 template <typename OP_SCHUR_ASSEMBLE_BASE>
590 template <typename I>
592  int side, EntityType type, EntitiesFieldData::EntData &data) {
593 
595 
596 #ifndef NDEBUG
597  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_opSchurAssembleEnd, 0, 0, 0, 0);
598 #endif
599 
600 #ifndef NDEBUG
601  if constexpr (debug_schur)
602  MOFEM_LOG("SELF", Sev::noisy) << "Schur assemble begin -> end";
603 #endif
604 
605 #ifndef NDEBUG
606  auto get_field_name = [&](auto uid) {
607  return OP::getPtrFE()->mField.get_field_name(field_bit_from_bit_number(
608  FieldEntity::getFieldBitNumberFromUniqueId(uid)));
609  };
610 #endif
611 
612  // Assemble Schur complement
613  auto assemble_mat = [&](SmartPetscObj<Mat> M, auto &storage) {
615  for (auto &s : storage) {
616  auto &m = s->getMat();
617  if (m.size1()) {
618  auto &row_ind = s->getRowInd();
619  auto &col_ind = s->getColInd();
620 
621  if (auto ierr = this->assembleSchurMat(
622 
623  M, s->uidRow, row_ind, s->uidCol, col_ind, m, ADD_VALUES
624 
625  )) {
626 #ifndef NDEBUG
627  auto field_ents = OP::getPtrFE()->mField.get_field_ents();
628  auto row_ent_it = field_ents->find(s->uidRow);
629  auto col_ent_it = field_ents->find(s->uidCol);
630  MOFEM_LOG_CHANNEL("SELF");
631  if (row_ent_it != field_ents->end())
632  MOFEM_LOG("SELF", Sev::error)
633  << "Assemble row entity: " << (*row_ent_it)->getName() << " "
634  << (*col_ent_it)->getEntTypeName() << " side "
635  << (*row_ent_it)->getSideNumber();
636  if (col_ent_it != field_ents->end())
637  MOFEM_LOG("SELF", Sev::error)
638  << "Assemble col entity: " << (*col_ent_it)->getName() << " "
639  << (*col_ent_it)->getEntTypeName() << " side "
640  << (*col_ent_it)->getSideNumber();
641 #endif // NDEBUG
642  CHK_THROW_MESSAGE(ierr, "MatSetValues");
643  }
644  }
645  }
646 
648  };
649 
650  auto apply_schur = [&](auto &storage,
651 
652  auto lo_uid, auto hi_uid,
653 
654  double eps, bool symm) {
656 
657  // add off diagonal matrix, i.e. schur complement
658  auto add_off_mat = [&](auto uid_row, auto uid_col, auto &row_ind,
659  auto &col_ind, auto &offMatInvDiagOffMat) {
661 
662  auto it = storage.template get<SchurElemMats::uid_mi_tag>().find(
663  boost::make_tuple(uid_row, uid_col));
664 
665  if (it == storage.template get<SchurElemMats::uid_mi_tag>().end()) {
666 
667  const auto size = SchurElemMats::locMats.size();
668 
669  if (SchurElemMats::maxIndexCounter == size) {
670  SchurElemMats::locMats.push_back(new MatrixDouble());
671  SchurElemMats::rowIndices.push_back(new VectorInt());
672  SchurElemMats::colIndices.push_back(new VectorInt());
673  SchurElemMats::schurElemMats.push_back(new SchurElemMats(
674  SchurElemMats::maxIndexCounter, uid_row, uid_col));
675  } else {
676  SchurElemMats::schurElemMats[SchurElemMats::maxIndexCounter].uidRow =
677  uid_row;
678  SchurElemMats::schurElemMats[SchurElemMats::maxIndexCounter].uidCol =
679  uid_col;
680  }
681 
682  auto p = SchurElemMats::schurL2Storage.emplace(
683  &SchurElemMats::schurElemMats[SchurElemMats::maxIndexCounter++]);
684 #ifndef NDEBUG
685  if (!p.second) {
686  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
687  "Failed to insert");
688  }
689 #endif // NDEBUG
690 
691  auto &mat = (*p.first)->getMat();
692  auto &set_row_ind = (*p.first)->getRowInd();
693  auto &set_col_ind = (*p.first)->getColInd();
694 
695  set_row_ind.resize(row_ind.size(), false);
696  noalias(set_row_ind) = row_ind;
697  set_col_ind.resize(col_ind.size(), false);
698  noalias(set_col_ind) = col_ind;
699 
700  mat.resize(offMatInvDiagOffMat.size1(), offMatInvDiagOffMat.size2(),
701  false);
702  noalias(mat) = offMatInvDiagOffMat;
703 
704 #ifndef NDEBUG
705  if constexpr (debug_schur) {
706  MOFEM_LOG("SELF", Sev::noisy) << "insert: " << get_field_name(uid_row)
707  << " " << get_field_name(uid_col) << " "
708  << mat.size1() << " " << mat.size2();
709  }
710 #endif // NDEBUG
711 
712  } else {
713 
714  auto &mat = (*it)->getMat();
715 #ifndef NDEBUG
716  if constexpr (debug_schur) {
717  MOFEM_LOG("SELF", Sev::noisy) << "add: " << get_field_name(uid_row)
718  << " " << get_field_name(uid_col) << " "
719  << mat.size1() << " " << mat.size2();
720  }
721 #endif // NDEBUG
722 
723 #ifndef NDEBUG
724  if (mat.size1() != offMatInvDiagOffMat.size1()) {
725  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
726  "Wrong size %d != %d", mat.size1(),
727  offMatInvDiagOffMat.size1());
728  }
729  if (mat.size2() != offMatInvDiagOffMat.size2()) {
730  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
731  "Wrong size %d != %d", mat.size2(),
732  offMatInvDiagOffMat.size2());
733  }
734 #endif // NDEBUG
735  mat += offMatInvDiagOffMat;
736  }
738  };
739 
740  auto get_row_view = [&]() {
741  auto row_it =
742  storage.template get<SchurElemMats::uid_mi_tag>().lower_bound(
743  boost::make_tuple(lo_uid, 0));
744  auto hi_row_it =
745  storage.template get<SchurElemMats::uid_mi_tag>().upper_bound(
746  boost::make_tuple(
747  hi_uid, FieldEntity::getHiBitNumberUId(BITFIELDID_SIZE - 1)));
748  std::vector<const SchurElemMats *> schur_row_ptr_view;
749  schur_row_ptr_view.reserve(std::distance(row_it, hi_row_it));
750  for (; row_it != hi_row_it; ++row_it) {
751  schur_row_ptr_view.push_back(*row_it);
752  }
753  return schur_row_ptr_view;
754  };
755 
756  auto get_col_view = [&]() {
757  auto col_it =
758  storage.template get<SchurElemMats::col_mi_tag>().lower_bound(lo_uid);
759  auto hi_col_it =
760  storage.template get<SchurElemMats::col_mi_tag>().upper_bound(hi_uid);
761  std::vector<const SchurElemMats *> schur_col_ptr_view;
762  schur_col_ptr_view.reserve(std::distance(col_it, hi_col_it));
763  for (; col_it != hi_col_it; ++col_it) {
764  schur_col_ptr_view.push_back(*col_it);
765  }
766  return schur_col_ptr_view;
767  };
768 
769  auto schur_row_ptr_view = get_row_view();
770  auto schur_col_ptr_view = get_col_view();
771 
772  // iterate row entities
773  for (auto row_it : schur_row_ptr_view) {
774  // only diagonals to get inverted diagonal
775  if (row_it->uidRow == row_it->uidCol) {
776 #ifndef NDEBUG
777  if constexpr (debug_schur) {
778  MOFEM_LOG("SELF", Sev::noisy)
779  << "invert: uid_row " << get_field_name(row_it->uidRow)
780  << " row uid " << get_field_name(row_it->uidCol) << " : "
781  << (*row_it)->getMat().size1() << " "
782  << (*row_it)->getMat().size2();
783  }
784 #endif // NDEBUG
785 
786  // note invMat is multiplied by -1
787  CHKERR I::invertMat(row_it, invMat, eps);
788 
789  // iterate column entities
790  for (auto c_lo : schur_col_ptr_view) {
791 
792  auto &uid_row = c_lo->uidRow;
793  if (uid_row == row_it->uidRow) {
794  continue;
795  }
796 
797  auto &cc_off_mat = c_lo->getMat();
798  invDiagOffMat.resize(cc_off_mat.size1(), invMat.size2(), false);
799 #ifndef NDEBUG
800  if (invMat.size1() != cc_off_mat.size2()) {
801  MOFEM_LOG("SELF", Sev::error)
802  << "row_uid " << get_field_name(row_it->uidRow) << " row uid "
803  << get_field_name(uid_row);
804  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
805  "Wrong size %d != %d", invMat.size1(), cc_off_mat.size2());
806  }
807 #endif // NDEBUG
808 
809  // noalias(invDiagOffMat) = prod(cc_off_mat, invMat);
810  // A10*inv(A00)
811  cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
812  cc_off_mat.size1(), invMat.size2(), cc_off_mat.size2(),
813  1., &*cc_off_mat.data().begin(), cc_off_mat.size2(),
814  &*invMat.data().begin(), invMat.size2(), 0.,
815  &*invDiagOffMat.data().begin(), invDiagOffMat.size2());
816 
817  // iterate row entities
818  for (auto r_lo : schur_row_ptr_view) {
819 
820  auto &uid_col = r_lo->uidCol;
821 
822  // Skip diagonal
823  if (uid_col == row_it->uidRow) {
824  continue;
825  }
826 
827  // If symmetry only run upper off diagonal terms
828  if (symm && uid_row > uid_col) {
829  continue;
830  }
831 
832  auto &rr_off_mat = r_lo->getMat();
833  offMatInvDiagOffMat.resize(invDiagOffMat.size1(),
834  rr_off_mat.size2(), false);
835 #ifndef NDEBUG
836  if (rr_off_mat.size1() != invDiagOffMat.size2()) {
837  MOFEM_LOG("SELF", Sev::error)
838  << "uid_row " << get_field_name(row_it->uidRow)
839  << ": col uid " << get_field_name(uid_col) << " row uid "
840  << get_field_name(uid_row);
841  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
842  "Wrong size %d != %d", rr_off_mat.size1(),
843  invDiagOffMat.size2());
844  }
845 #endif // NDEBUG
846 
847  // noalias(offMatInvDiagOffMat) = prod(invDiagOffMat, rr_off_mat);
848  // A10*inv(A00)*A01
849  cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
850  invDiagOffMat.size1(), rr_off_mat.size2(),
851  invDiagOffMat.size2(), 1.,
852  &*invDiagOffMat.data().begin(), invDiagOffMat.size2(),
853  &*rr_off_mat.data().begin(), rr_off_mat.size2(), 0.,
854  &*offMatInvDiagOffMat.data().begin(),
855  offMatInvDiagOffMat.size2());
856 
857  // Make on diagonal A11 have Schur complement S
858  CHKERR add_off_mat(uid_row, uid_col, c_lo->getRowInd(),
859  r_lo->getColInd(), offMatInvDiagOffMat);
860 
861  if (symm && uid_row != uid_col) {
862  transOffMatInvDiagOffMat.resize(offMatInvDiagOffMat.size2(),
863  offMatInvDiagOffMat.size1(),
864  false);
865  noalias(transOffMatInvDiagOffMat) = trans(offMatInvDiagOffMat);
866  CHKERR add_off_mat(uid_col, uid_row, r_lo->getColInd(),
867  c_lo->getRowInd(), transOffMatInvDiagOffMat);
868  }
869  }
870  }
871  }
872  }
873 
875  };
876 
877  auto erase_factored = [&](auto &storage, auto lo_uid, auto hi_uid) {
879 
880  auto r_lo = storage.template get<SchurElemMats::uid_mi_tag>().lower_bound(
881  boost::make_tuple(lo_uid, 0));
882  auto r_hi = storage.template get<SchurElemMats::uid_mi_tag>().upper_bound(
883  boost::make_tuple(hi_uid,
884  FieldEntity::getHiBitNumberUId(BITFIELDID_SIZE - 1)));
885  storage.template get<SchurElemMats::uid_mi_tag>().erase(r_lo, r_hi);
886 
887  auto c_lo =
888  storage.template get<SchurElemMats::col_mi_tag>().lower_bound(lo_uid);
889  auto c_hi =
890  storage.template get<SchurElemMats::col_mi_tag>().upper_bound(hi_uid);
891  storage.template get<SchurElemMats::col_mi_tag>().erase(c_lo, c_hi);
892 
894  };
895 
896  auto assemble_S = [&](auto &storage, auto ao, auto mat) {
898 
899  // apply AO
900  if (ao) {
901  for (auto &m : storage) {
902  auto &ind_row = m->getRowInd();
903  CHKERR AOApplicationToPetsc(ao, ind_row.size(), &*ind_row.begin());
904  auto &ind_col = m->getColInd();
905  CHKERR AOApplicationToPetsc(ao, ind_col.size(), &*ind_col.begin());
906  }
907  }
908 
909  // assemble matrix
910  if (mat) {
911  CHKERR assemble_mat(mat, storage);
912  }
913 
915  };
916 
917  auto assemble_mat_a00_solver = [&](auto &&list) {
919 #ifndef NDEBUG
920  if (!diagBlocks->dataInvBlocksPtr)
921  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "No dataInvBlocksPtr");
922 #endif // NDEBUG
923 
924  for (auto lo : list) {
925  auto &m = lo->getMat();
926  if (m.size1() && m.size2()) {
927  auto row_ind = lo->getRowInd()[0];
928  auto col_ind = lo->getColInd()[0];
929  auto nb_rows = m.size1();
930  auto nb_cols = m.size2();
931  auto it = diagBlocks->blockIndex.get<1>().find(
932  boost::make_tuple(row_ind, col_ind, nb_rows, nb_cols));
933  if (it != diagBlocks->blockIndex.get<1>().end()) {
934  auto inv_shift = it->getInvShift();
935  if (inv_shift != -1) {
936  auto *ptr = &((*diagBlocks->dataInvBlocksPtr)[inv_shift]);
937  // assemble of diag terms, witch might be changed by Schur
938  // complement
939  std::copy(m.data().begin(), m.data().end(), ptr);
940  }
941  }
942  }
943  }
945  };
946 
947  auto assemble_inv_diag_a00_solver = [&](auto &&list) {
949 #ifndef NDEBUG
950  if (!diagBlocks->dataInvBlocksPtr)
951  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "No dataInvBlocksPtr");
952 #endif // NDEBUG
953 
954  for (auto lo : list) {
955  if (invMat.size1() && invMat.size2()) {
956  auto row_ind = lo->getRowInd()[0];
957  auto col_ind = lo->getColInd()[0];
958  auto nb_rows = invMat.size1();
959  auto nb_cols = invMat.size2();
960  auto it = diagBlocks->blockIndex.get<1>().find(
961  boost::make_tuple(row_ind, col_ind, nb_rows, nb_cols));
962  if (it != diagBlocks->blockIndex.get<1>().end()) {
963  auto inv_shift = it->getInvShift();
964 #ifndef NDEBUG
965  if (inv_shift == -1)
966  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "No inv_shift");
967 #endif // NDEBUG
968  auto *ptr = &((*diagBlocks->dataInvBlocksPtr)[inv_shift]);
969  // assemble inverted diag
970  std::copy(invMat.data().begin(), invMat.data().end(), ptr);
971  }
972  }
973  }
975  };
976 
977  auto gey_a00_diag_list = [&](auto &storage, auto lo_uid, auto hi_uid) {
978  std::vector<const SchurElemMats *> list;
979  auto lo = storage.template get<SchurElemMats::uid_mi_tag>().lower_bound(
980  boost::make_tuple(lo_uid, 0));
981  auto hi = storage.template get<SchurElemMats::uid_mi_tag>().upper_bound(
982  boost::make_tuple(hi_uid,
983  FieldEntity::getHiBitNumberUId(BITFIELDID_SIZE - 1)));
984  list.reserve(std::distance(lo, hi));
985  for (auto it = lo; it != hi; ++it) {
986  if ((*it)->getRowInd()[0] == (*it)->getColInd()[0]) {
987  list.push_back(*it);
988  }
989  }
990  return list;
991  };
992 
993  auto get_a00_row_list = [&](auto &storage, auto lo_uid, auto hi_uid) {
994  std::vector<const SchurElemMats *> list;
995  auto lo = storage.template get<SchurElemMats::uid_mi_tag>().lower_bound(
996  boost::make_tuple(lo_uid, 0));
997  auto hi = storage.template get<SchurElemMats::uid_mi_tag>().upper_bound(
998  boost::make_tuple(hi_uid,
999  FieldEntity::getHiBitNumberUId(BITFIELDID_SIZE - 1)));
1000  list.reserve(std::distance(lo, hi));
1001  for (auto it = lo; it != hi; ++it) {
1002  if ((*it)->getRowInd()[0] != (*it)->getColInd()[0]) {
1003  list.push_back(*it);
1004  }
1005  }
1006  return list;
1007  };
1008 
1009  auto get_a00_col_list = [&](auto &storage, auto lo_uid, auto hi_uid) {
1010  std::vector<const SchurElemMats *> list;
1011  auto lo =
1012  storage.template get<SchurElemMats::col_mi_tag>().lower_bound(lo_uid);
1013  auto hi =
1014  storage.template get<SchurElemMats::col_mi_tag>().upper_bound(hi_uid);
1015  list.reserve(std::distance(lo, hi));
1016  for (auto it = lo; it != hi; ++it) {
1017  if ((*it)->getRowInd()[0] != (*it)->getColInd()[0]) {
1018  list.push_back(*it);
1019  }
1020  }
1021  return list;
1022  };
1023 
1024  auto get_a00_uids = [&]() {
1025  auto get_field_bit = [&](auto &name) {
1026  return OP::getPtrFE()->mField.get_field_bit_number(name);
1027  };
1028 
1029  std::vector<std::pair<UId, UId>> a00_uids;
1030  a00_uids.reserve(fieldsName.size());
1031  for (auto ss = 0; ss != fieldsName.size(); ++ss) {
1032  auto field_bit = get_field_bit(fieldsName[ss]);
1033  auto row_ents = fieldEnts[ss];
1034  if (row_ents) {
1035  for (auto p = row_ents->pair_begin(); p != row_ents->pair_end(); ++p) {
1036  auto lo_uid =
1037  FieldEntity::getLoLocalEntityBitNumber(field_bit, p->first);
1038  auto hi_uid =
1039  FieldEntity::getHiLocalEntityBitNumber(field_bit, p->second);
1040  a00_uids.push_back(std::make_pair(lo_uid, hi_uid));
1041  }
1042  } else {
1043  auto lo_uid = FieldEntity::getLoLocalEntityBitNumber(
1044  field_bit, get_id_for_min_type<MBVERTEX>());
1045  auto hi_uid = FieldEntity::getHiLocalEntityBitNumber(
1046  field_bit, get_id_for_max_type<MBENTITYSET>());
1047  a00_uids.push_back(std::make_pair(lo_uid, hi_uid));
1048  }
1049  }
1050  return a00_uids;
1051  };
1052 
1053 #ifndef NDEBUG
1054  auto list_storage = [&](auto &storage) {
1056  int i = 0;
1057  for (auto &p : storage) {
1058  MOFEM_LOG("SELF", Sev::noisy)
1059  << "List schur storage: " << i << " " << p->iDX << ": "
1060  << get_field_name(p->uidRow) << " " << get_field_name(p->uidCol)
1061  << " : " << p->getMat().size1() << " " << p->getMat().size2();
1062  ++i;
1063  }
1065  };
1066 #endif // NDEBUG
1067 
1068  auto assemble = [&](auto &&a00_uids) {
1070  auto &storage = SchurElemMats::schurL2Storage;
1071  int ss = 0;
1072  for (auto &p : a00_uids) {
1073  auto [lo_uid, hi_uid] = p;
1074 #ifndef NDEBUG
1075  if constexpr (debug_schur) {
1076  list_storage(storage);
1077  MOFEM_LOG("SELF", Sev::noisy)
1078  << "Schur assemble: " << get_field_name(lo_uid) << " - "
1079  << get_field_name(hi_uid);
1080  }
1081 #endif
1082  CHKERR apply_schur(storage, lo_uid, hi_uid, diagEps[ss], symSchur[ss]);
1083  if (diagBlocks) {
1084  CHKERR assemble_mat_a00_solver(
1085  get_a00_row_list(storage, lo_uid, hi_uid));
1086  CHKERR assemble_mat_a00_solver(
1087  get_a00_col_list(storage, lo_uid, hi_uid));
1088  CHKERR assemble_inv_diag_a00_solver(
1089  gey_a00_diag_list(storage, lo_uid, hi_uid));
1090  }
1091  CHKERR erase_factored(storage, lo_uid, hi_uid);
1092  CHKERR assemble_S(storage, sequenceOfAOs[ss], sequenceOfMats[ss]);
1093  ++ss;
1094  }
1096  };
1097 
1098  // Assemble Schur complements
1099  CHKERR assemble(get_a00_uids());
1100 
1101 #ifndef NDEBUG
1102  if constexpr (debug_schur)
1103  MOFEM_LOG("SELF", Sev::noisy) << "Schur assemble done";
1104 #endif
1105 
1106 #ifndef NDEBUG
1107  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_opSchurAssembleEnd, 0, 0, 0, 0);
1108 #endif
1109 
1111 }
1112 
1113 struct SchurDSYSV {
1114  static auto invertMat(const SchurElemMats *row_ptr, MatrixDouble &inv,
1115  double eps) {
1117 
1118  auto &m = row_ptr->getMat();
1119 
1120  VectorInt ipiv;
1121  VectorDouble lapack_work;
1122  const int nb = m.size1();
1123 
1124  if (eps) {
1125  for (int cc = 0; cc != nb; ++cc) {
1126  m(cc, cc) += eps;
1127  }
1128  }
1129 
1130  inv.resize(nb, nb, false);
1131  inv.clear();
1132  auto ptr = &*inv.data().begin();
1133  for (int c = 0; c != nb; ++c, ptr += nb + 1)
1134  *ptr = -1;
1135  ipiv.resize(nb, false);
1136  lapack_work.resize(nb * nb, false);
1137  const auto info =
1138  lapack_dsysv('L', nb, nb, &*m.data().begin(), nb, &*ipiv.begin(),
1139  &*inv.data().begin(), nb, &*lapack_work.begin(), nb * nb);
1140  if (info != 0)
1141  SETERRQ1(PETSC_COMM_SELF, MOFEM_OPERATION_UNSUCCESSFUL,
1142  "Can not invert matrix info = %d", info);
1144  };
1145 };
1146 
1147 struct SchurDGESV {
1148  static auto invertMat(const SchurElemMats *row_ptr, MatrixDouble &inv,
1149  double eps) {
1151 
1152  auto &m = row_ptr->getMat();
1153 
1154  VectorInt ipiv;
1155  const auto nb = m.size1();
1156 #ifndef NDEBUG
1157  if (nb != m.size2()) {
1158  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1159  "It should be square matrix %d != %d", nb, m.size2());
1160  }
1161 #endif
1162 
1163  if (eps) {
1164  for (int cc = 0; cc != nb; ++cc) {
1165  m(cc, cc) += eps;
1166  }
1167  }
1168 
1169  inv.resize(nb, nb, false);
1170  inv.clear();
1171  auto ptr = &*inv.data().begin();
1172  for (int c = 0; c != nb; ++c, ptr += nb + 1)
1173  *ptr = -1;
1174  ipiv.resize(nb, false);
1175  const auto info = lapack_dgesv(nb, nb, &*m.data().begin(), nb,
1176  &*ipiv.begin(), &*inv.data().begin(), nb);
1177  if (info != 0)
1178  SETERRQ1(PETSC_COMM_SELF, MOFEM_OPERATION_UNSUCCESSFUL,
1179  "Can not invert matrix info = %d", info);
1181  };
1182 };
1183 
1187  return doWorkImpl<SchurDSYSV>(side, type, data);
1188 }
1189 
1193  return doWorkImpl<SchurDGESV>(side, type, data);
1194 }
1195 
1196 boost::shared_ptr<BlockStructure> createBlockMatStructure(
1197 
1198  DM dm, //< dm
1199  SchurFEOpsFEandFields schur_fe_op_vec //< block elements
1200 
1201 ) {
1202 
1203  auto cmp_uid_lo = [](const boost::weak_ptr<FieldEntity> &a, const UId &b) {
1204  if (auto a_ptr = a.lock()) {
1205  if (a_ptr->getLocalUniqueId() < b)
1206  return true;
1207  else
1208  return false;
1209  } else {
1210  return false;
1211  }
1212  };
1213 
1214  auto cmp_uid_hi = [](const UId &b, const boost::weak_ptr<FieldEntity> &a) {
1215  if (auto a_ptr = a.lock()) {
1216  if (b < a_ptr->getLocalUniqueId())
1217  return true;
1218  else
1219  return false;
1220  } else {
1221  return true;
1222  }
1223  };
1224 
1225  // get uids for fields
1226  auto get_uid_pair = [](const auto &field_id) {
1227  auto lo_uid = FieldEntity::getLocalUniqueIdCalculate(
1228  field_id, get_id_for_min_type<MBVERTEX>());
1229  auto hi_uid = FieldEntity::getLocalUniqueIdCalculate(
1230  field_id, get_id_for_max_type<MBENTITYSET>());
1231  return std::make_pair(lo_uid, hi_uid);
1232  };
1233 
1234  // get uids pair
1235  auto get_it_pair = [cmp_uid_lo, cmp_uid_hi](auto &&field_ents, auto &&p_uid) {
1236  auto lo = std::lower_bound(field_ents.begin(), field_ents.end(),
1237  p_uid.first, cmp_uid_lo);
1238  auto hi = std::upper_bound(field_ents.begin(), field_ents.end(),
1239  p_uid.second, cmp_uid_hi);
1240  return std::make_pair(lo, hi);
1241  };
1242 
1243  // extract DOFs for rows/columns. DOFs are associated with fields entities
1244  // for given problem.
1245  auto row_extractor = [](auto &e) { return e->entityCacheRowDofs; };
1246  auto col_extractor = [](auto &e) { return e->entityCacheColDofs; };
1247 
1248  auto extract_data = [](auto &&its, auto extractor) {
1249  std::vector<std::tuple<int, int, int>> data;
1250  data.reserve(std::distance(its.first, its.second));
1251  // iterate field dofs
1252  for (; its.first != its.second; ++its.first) {
1253  if (auto e = its.first->lock()) {
1254  if (auto cache = extractor(e).lock()) {
1255  auto nb_dofs = std::distance(cache->loHi[0], cache->loHi[1]);
1256  if (nb_dofs) {
1257  auto glob = (*cache->loHi[0])->getPetscGlobalDofIdx();
1258  auto loc = (*cache->loHi[0])->getPetscLocalDofIdx();
1259  data.emplace_back(glob, nb_dofs, loc);
1260 
1261 #ifndef NDEBUG
1262 
1263  for (auto lo = cache->loHi[0]; lo != cache->loHi[1]; ++lo) {
1264  auto glob = (*lo)->getPetscGlobalDofIdx();
1265  if (glob == -1) {
1267  "Wrong global index");
1268  }
1269  }
1270 
1271 #endif
1272  }
1273  }
1274  }
1275  }
1276  return data;
1277  };
1278 
1279  auto data_ptr = boost::make_shared<BlockStructure>();
1280  auto m_field_ptr = getInterfacePtr(dm);
1281 
1282  // create element to extract data
1283  auto fe_method = boost::shared_ptr<MoFEM::FEMethod>(new MoFEM::FEMethod());
1284 
1285  for (auto &d : schur_fe_op_vec) {
1286 
1287  // extract bit numbers for row and column
1288  auto get_bit_numbers = [&d](auto op) {
1289  std::vector<FieldBitNumber> bit_numbers(d.second.size());
1290  std::transform(d.second.begin(), d.second.end(), bit_numbers.begin(), op);
1291  return bit_numbers;
1292  };
1293 
1294  // extract bit numbers for row
1295  auto row_bit_numbers = get_bit_numbers(
1296  [&](auto &p) { return m_field_ptr->get_field_bit_number(p.first); });
1297  // extract bit numbers for row
1298  auto col_bit_numbers = get_bit_numbers(
1299  [&](auto &p) { return m_field_ptr->get_field_bit_number(p.second); });
1300 
1301  fe_method->preProcessHook = []() { return 0; };
1302  fe_method->postProcessHook = []() { return 0; };
1303  fe_method->operatorHook = [&]() {
1305 
1306  for (auto f = 0; f != row_bit_numbers.size(); ++f) {
1307 
1308  auto row_data =
1309  extract_data(get_it_pair(fe_method->getRowFieldEnts(),
1310  get_uid_pair(row_bit_numbers[f])),
1311  row_extractor);
1312  auto col_data =
1313  extract_data(get_it_pair(fe_method->getColFieldEnts(),
1314  get_uid_pair(col_bit_numbers[f])),
1315  col_extractor);
1316 
1317  for (auto &r : row_data) {
1318  auto [r_glob, r_nb_dofs, r_loc] = r;
1319  for (auto &c : col_data) {
1320  auto [c_glob, c_nb_dofs, c_loc] = c;
1321  if (r_glob != -1 && c_glob != -1) {
1322  data_ptr->blockIndex.insert(BlockStructure::Indexes{
1323  r_glob, c_glob, r_nb_dofs, c_nb_dofs, r_loc, c_loc, -1, -1});
1324  }
1325  }
1326  }
1327  }
1328 
1330  };
1331 
1332  CHKERR DMoFEMLoopFiniteElementsUpAndLowRank(dm, d.first, fe_method, 0,
1333  m_field_ptr->get_comm_size());
1334  };
1335 
1336  // order by column (that is for matrix multiplication)
1337  auto mem_size = 0;
1338  for (auto &v : data_ptr->blockIndex.get<0>()) {
1339  v.getMatShift() = mem_size;
1340  mem_size += v.getNbCols() * v.getNbRows();
1341  }
1342 
1343  std::vector<double> tmp;
1344  if (mem_size > tmp.max_size())
1346 
1347  data_ptr->dataBlocksPtr =
1348  boost::make_shared<std::vector<double>>(mem_size, 0);
1349 
1350  auto ghost_x = createDMVector(dm);
1351  auto ghost_y = createDMVector(dm);
1352  CHKERR VecSetDM(ghost_x, PETSC_NULL);
1353  CHKERR VecSetDM(ghost_y, PETSC_NULL);
1354 
1355  data_ptr->ghostX = ghost_x;
1356  data_ptr->ghostY = ghost_y;
1357 
1358  return data_ptr;
1359 }
1360 
1361 static MoFEMErrorCode mult_schur_block_shell(Mat mat, Vec x, Vec y,
1362  InsertMode iora);
1363 
1364 static MoFEMErrorCode solve_schur_block_shell(Mat mat, Vec x, Vec y,
1365  InsertMode iora);
1366 
1367 static PetscErrorCode mult(Mat mat, Vec x, Vec y) {
1368  return mult_schur_block_shell(mat, x, y, INSERT_VALUES);
1369 }
1370 static PetscErrorCode mult_add(Mat mat, Vec x, Vec y) {
1371  return mult_schur_block_shell(mat, x, y, ADD_VALUES);
1372 }
1373 static PetscErrorCode solve(Mat mat, Vec x, Vec y) {
1374  return solve_schur_block_shell(mat, x, y, INSERT_VALUES);
1375 }
1376 static PetscErrorCode solve_add(Mat mat, Vec x, Vec y) {
1377  return solve_schur_block_shell(mat, x, y, ADD_VALUES);
1378 }
1379 
1380 static PetscErrorCode zero_rows_columns(Mat A, PetscInt N,
1381  const PetscInt rows[], PetscScalar diag,
1382  Vec x, Vec b) {
1383 
1385  BlockStructure *ctx;
1386  CHKERR MatShellGetContext(A, (void **)&ctx);
1387 
1388  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_zeroRowsAndCols, 0, 0, 0, 0);
1389 
1390  const int *ptr = &rows[0];
1391  int is_nb_rows = N;
1392  SmartPetscObj<IS> is_local;
1393 
1394  MPI_Comm comm;
1395  CHKERR PetscObjectGetComm((PetscObject)A, &comm);
1396  int size;
1397  MPI_Comm_size(comm, &size);
1398  if (size > 1) {
1399  auto is = createISGeneral(comm, N, rows, PETSC_USE_POINTER);
1400  is_local = isAllGather(is);
1401  }
1402  if (is_local) {
1403  CHKERR ISGetSize(is_local, &is_nb_rows);
1404 #ifndef NDEBUG
1405  if constexpr (debug_schur) {
1406  CHKERR ISView(is_local, PETSC_VIEWER_STDOUT_WORLD);
1407  }
1408 #endif
1409  CHKERR ISGetIndices(is_local, &ptr);
1410  }
1411 
1412  int loc_m, loc_n;
1413  CHKERR MatGetLocalSize(A, &loc_m, &loc_n);
1414 
1415  for (auto n = 0; n != is_nb_rows; ++n) {
1416  auto row = ptr[n];
1417  auto rlo = ctx->blockIndex.get<2>().lower_bound(row);
1418  auto rhi = ctx->blockIndex.get<2>().end();
1419  for (; rlo != rhi; ++rlo) {
1420  auto r_shift = row - rlo->getRow();
1421  if (r_shift >= 0 && r_shift < rlo->getNbRows()) {
1422  auto *ptr = &(*ctx->dataBlocksPtr)[rlo->getMatShift()];
1423  for (auto i = 0; i != rlo->getNbCols(); ++i) {
1424  ptr[i + r_shift * rlo->getNbCols()] = 0;
1425  }
1426  } else if (rlo->getRow() + rlo->getNbRows() > row) {
1427  break;
1428  }
1429  }
1430  }
1431 
1432  for (auto n = 0; n != is_nb_rows; ++n) {
1433  auto col = ptr[n];
1434  auto clo = ctx->blockIndex.get<3>().lower_bound(col);
1435  auto chi = ctx->blockIndex.get<3>().end();
1436  for (; clo != chi; ++clo) {
1437  auto c_shift = col - clo->getCol();
1438  if (c_shift >= 0 && c_shift < clo->getNbCols()) {
1439 
1440  auto *ptr = &(*ctx->dataBlocksPtr)[clo->getMatShift()];
1441  for (auto i = 0; i != clo->getNbRows(); ++i) {
1442  ptr[c_shift + i * clo->getNbCols()] = 0;
1443  }
1444 
1445  // diagonal
1446  if (
1447 
1448  clo->getRow() == clo->getCol() && clo->getLocRow() < loc_m &&
1449  clo->getLocCol() < loc_n
1450 
1451  ) {
1452  auto r_shift = col - clo->getCol();
1453  if (r_shift >= 0 && r_shift < clo->getNbRows()) {
1454  ptr[c_shift + r_shift * clo->getNbCols()] = diag;
1455  }
1456  }
1457  } else if (clo->getCol() + clo->getNbCols() > col) {
1458  break;
1459  }
1460  }
1461  }
1462 
1463  if (is_local) {
1464  CHKERR ISRestoreIndices(is_local, &ptr);
1465  }
1466 
1467  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_zeroRowsAndCols, 0, 0, 0, 0);
1468 
1470 }
1471 
1472 static PetscErrorCode mat_zero(Mat m) {
1474  BlockStructure *ctx;
1475  CHKERR MatShellGetContext(m, (void **)&ctx);
1476  if (ctx->dataBlocksPtr)
1477  std::fill(ctx->dataBlocksPtr->begin(), ctx->dataBlocksPtr->end(), 0.);
1478  if (ctx->dataInvBlocksPtr)
1479  std::fill(ctx->dataInvBlocksPtr->begin(), ctx->dataInvBlocksPtr->end(), 0.);
1480  if (ctx->preconditionerBlocksPtr)
1481  std::fill(ctx->preconditionerBlocksPtr->begin(),
1482  ctx->preconditionerBlocksPtr->end(), 0.);
1484 }
1485 
1488  CHKERR MatShellSetManageScalingShifts(mat_raw);
1489  CHKERR MatShellSetOperation(mat_raw, MATOP_MULT, (void (*)(void))mult);
1490  CHKERR MatShellSetOperation(mat_raw, MATOP_MULT_ADD,
1491  (void (*)(void))mult_add);
1492  CHKERR MatShellSetOperation(mat_raw, MATOP_SOLVE, (void (*)(void))solve);
1493  CHKERR MatShellSetOperation(mat_raw, MATOP_SOLVE_ADD,
1494  (void (*)(void))solve_add);
1495  CHKERR MatShellSetOperation(mat_raw, MATOP_ZERO_ENTRIES,
1496  (void (*)(void))mat_zero);
1497  CHKERR MatShellSetOperation(mat_raw, MATOP_ZERO_ROWS_COLUMNS,
1498  (void (*)(void))zero_rows_columns);
1499 
1501 };
1502 
1504  boost::shared_ptr<BlockStructure> data) {
1505 
1506  auto problem_ptr = getProblemPtr(dm);
1507  auto nb_local = problem_ptr->nbLocDofsRow;
1508  auto nb_global = problem_ptr->nbDofsRow;
1509 
1510  // check in nb, rows is equal to nb. columns.
1511  if (nb_local != problem_ptr->nbLocDofsCol) {
1512  MOFEM_LOG("SELF", Sev::error)
1513  << "Wrong size " << nb_local << " != " << problem_ptr->nbLocDofsCol;
1515  "nb. cols is inconsistent with nb. rows");
1516  }
1517  if (nb_global != problem_ptr->nbDofsCol) {
1518  MOFEM_LOG("SELF", Sev::error)
1519  << "Wrong size " << nb_global << " != " << problem_ptr->nbDofsCol;
1521  "nb. cols is inconsistent with nb. rows");
1522  }
1523 
1524  // get comm from DM
1525  MPI_Comm comm;
1526  CHKERR PetscObjectGetComm((PetscObject)dm, &comm);
1527 
1528  Mat mat_raw;
1529  CHKERR MatCreateShell(comm, nb_local, nb_local, nb_global, nb_global,
1530  (void *)data.get(), &mat_raw);
1531  CHKERR setSchurBlockMatOps(mat_raw);
1532  // CHKERR PetscObjectSetName((PetscObject)mat_raw, MoFEM_BLOCK_MAT);
1533 
1534  return std::make_pair(SmartPetscObj<Mat>(mat_raw), data);
1535 }
1536 
1537 constexpr int max_gemv_size = 2;
1538 
1540  InsertMode iora) {
1542  BlockStructure *ctx;
1543  CHKERR MatShellGetContext(mat, (void **)&ctx);
1544 
1545  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_BlockStructureMult, 0, 0, 0, 0);
1546 
1547  int x_loc_size;
1548  CHKERR VecGetLocalSize(x, &x_loc_size);
1549  int y_loc_size;
1550  CHKERR VecGetLocalSize(y, &y_loc_size);
1551 
1552  Vec ghost_x = ctx->ghostX;
1553  Vec ghost_y = ctx->ghostY;
1554 
1555  CHKERR VecCopy(x, ghost_x);
1556 
1557  double *y_array;
1558  Vec loc_ghost_y;
1559  CHKERR VecGhostGetLocalForm(ghost_y, &loc_ghost_y);
1560  int nb_y;
1561  CHKERR VecGetLocalSize(loc_ghost_y, &nb_y);
1562  CHKERR VecGetArray(loc_ghost_y, &y_array);
1563  for (auto i = 0; i != nb_y; ++i)
1564  y_array[i] = 0.;
1565  CHKERR VecRestoreArray(loc_ghost_y, &y_array);
1566  CHKERR VecGhostRestoreLocalForm(ghost_y, &loc_ghost_y);
1567 
1568  auto mult = [&](int low_x, int hi_x, int low_y, int hi_y) {
1570 
1571  double *x_array;
1572  Vec loc_ghost_x;
1573  CHKERR VecGhostGetLocalForm(ghost_x, &loc_ghost_x);
1574  CHKERR VecGetArray(loc_ghost_x, &x_array);
1575 
1576  double *y_array;
1577  Vec loc_ghost_y;
1578  CHKERR VecGhostGetLocalForm(ghost_y, &loc_ghost_y);
1579  int nb_y;
1580  CHKERR VecGetLocalSize(loc_ghost_y, &nb_y);
1581  CHKERR VecGetArray(loc_ghost_y, &y_array);
1582 
1583  double *block_ptr = &*ctx->dataBlocksPtr->begin();
1584  auto it = ctx->blockIndex.get<0>().lower_bound(0);
1585  auto hi = ctx->blockIndex.get<0>().end();
1586 
1587  while (it != hi) {
1588  if (it->getLocRow() < low_y || it->getLocRow() >= hi_y ||
1589  it->getLocCol() < low_x || it->getLocCol() >= hi_x) {
1590  ++it;
1591  continue;
1592  }
1593 
1594  auto nb_rows = it->getNbRows();
1595  auto nb_cols = it->getNbCols();
1596  auto x_ptr = &x_array[it->getLocCol()];
1597  auto y_ptr = &y_array[it->getLocRow()];
1598  auto ptr = &block_ptr[it->getMatShift()];
1599 
1600  if (std::min(nb_rows, nb_cols) > max_gemv_size) {
1601  cblas_dgemv(CblasRowMajor, CblasNoTrans, nb_rows, nb_cols, 1.0, ptr,
1602  nb_cols, x_ptr, 1, 1.0, y_ptr, 1);
1603  } else {
1604  for (auto r = 0; r != nb_rows; ++r) {
1605  for (auto c = 0; c != nb_cols; ++c) {
1606  y_ptr[r] += ptr[r * nb_cols + c] * x_ptr[c];
1607  }
1608  }
1609  }
1610  ++it;
1611  }
1612 
1613  if (ctx->multiplyByPreconditioner) {
1614 
1615  if (!ctx->preconditionerBlocksPtr)
1616  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1617  "No parentBlockStructurePtr");
1618 
1619  auto preconditioner_ptr = &*ctx->preconditionerBlocksPtr->begin();
1620 
1621  auto it = ctx->blockIndex.get<0>().lower_bound(0);
1622  auto hi = ctx->blockIndex.get<0>().end();
1623 
1624  while (it != hi) {
1625  if (it->getLocRow() < low_y || it->getLocRow() >= hi_y ||
1626  it->getLocCol() < low_x || it->getLocCol() >= hi_x) {
1627  ++it;
1628  continue;
1629  }
1630 
1631  if (it->getInvShift() != -1) {
1632  auto nb_rows = it->getNbRows();
1633  auto nb_cols = it->getNbCols();
1634  auto x_ptr = &x_array[it->getLocCol()];
1635  auto y_ptr = &y_array[it->getLocRow()];
1636  auto ptr = &preconditioner_ptr[it->getInvShift()];
1637  if (std::min(nb_rows, nb_cols) > max_gemv_size) {
1638  cblas_dgemv(CblasRowMajor, CblasNoTrans, nb_rows, nb_cols, 1.0, ptr,
1639  nb_cols, x_ptr, 1, 1.0, y_ptr, 1);
1640  } else {
1641  for (auto r = 0; r != nb_rows; ++r) {
1642  for (auto c = 0; c != nb_cols; ++c) {
1643  y_ptr[r] += ptr[r * nb_cols + c] * x_ptr[c];
1644  }
1645  }
1646  }
1647  }
1648 
1649  ++it;
1650  }
1651  }
1652 
1653  CHKERR VecRestoreArray(loc_ghost_x, &x_array);
1654  CHKERR VecRestoreArray(loc_ghost_y, &y_array);
1655  CHKERR VecGhostRestoreLocalForm(ghost_x, &loc_ghost_x);
1656  CHKERR VecGhostRestoreLocalForm(ghost_y, &loc_ghost_y);
1658  };
1659 
1660  constexpr auto max_int = std::numeric_limits<int>::max();
1661  CHKERR VecGhostUpdateBegin(ghost_x, INSERT_VALUES, SCATTER_FORWARD);
1662  CHKERR mult(0, x_loc_size, 0, max_int);
1663  CHKERR VecGhostUpdateEnd(ghost_x, INSERT_VALUES, SCATTER_FORWARD);
1664  CHKERR mult(x_loc_size, max_int, 0, max_int);
1665 
1666  CHKERR VecGhostUpdateBegin(ghost_y, ADD_VALUES, SCATTER_REVERSE);
1667  CHKERR VecGhostUpdateEnd(ghost_y, ADD_VALUES, SCATTER_REVERSE);
1668 
1669  switch (iora) {
1670  case INSERT_VALUES:
1671  CHKERR VecCopy(ghost_y, y);
1672  break;
1673  case ADD_VALUES:
1674  CHKERR VecAXPY(y, 1., ghost_y);
1675  break;
1676  default:
1677  CHK_MOAB_THROW(MOFEM_NOT_IMPLEMENTED, "Wrong InsertMode");
1678  }
1679 
1680 #ifndef NDEBUG
1681 
1682  auto print_norm = [&](auto msg, auto y) {
1684  PetscReal norm;
1685  CHKERR VecNorm(y, NORM_2, &norm);
1686  int nb_loc_y;
1687  CHKERR VecGetLocalSize(y, &nb_loc_y);
1688  int nb_y;
1689  CHKERR VecGetSize(y, &nb_y);
1690  MOFEM_LOG("WORLD", Sev::noisy)
1691  << msg << " " << nb_y << " " << nb_loc_y << " norm " << norm;
1693  };
1694 
1695  switch (iora) {
1696  case INSERT_VALUES:
1697  print_norm("mult_schur_block_shell insert x", x);
1698  print_norm("mult_schur_block_shell insert y", y);
1699  break;
1700  case ADD_VALUES:
1701  print_norm("mult_schur_block_shell add x", x);
1702  print_norm("mult_schur_block_shell add y", y);
1703  break;
1704  default:
1705  CHK_MOAB_THROW(MOFEM_NOT_IMPLEMENTED, "Wrong InsertMode");
1706  }
1707 
1708 #endif // NDEBUG
1709 
1710  // PetscLogFlops(xxx)
1711  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_BlockStructureMult, 0, 0, 0, 0);
1712 
1714 }
1715 
1717  InsertMode iora) {
1719  BlockStructure *ctx;
1720  CHKERR MatShellGetContext(mat, (void **)&ctx);
1721 
1722  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_BlockStructureSolve, 0, 0, 0, 0);
1723 
1724  // Note that for solver those two are swapped
1725  Vec ghost_x = ctx->ghostY;
1726  Vec ghost_y = ctx->ghostX;
1727 
1728  CHKERR VecCopy(y, ghost_y);
1729  CHKERR VecZeroEntries(ghost_x);
1730 
1731  // CHKERR VecGhostUpdateBegin(ghost_y, INSERT_VALUES, SCATTER_FORWARD);
1732  // CHKERR VecGhostUpdateEnd(ghost_y, INSERT_VALUES, SCATTER_FORWARD);
1733 
1734  double *x_array;
1735  Vec loc_ghost_x;
1736  CHKERR VecGhostGetLocalForm(ghost_x, &loc_ghost_x);
1737  CHKERR VecGetArray(loc_ghost_x, &x_array);
1738 
1739  Vec loc_ghost_y;
1740  CHKERR VecGhostGetLocalForm(ghost_y, &loc_ghost_y);
1741  auto inv_y = vectorDuplicate(loc_ghost_y);
1742  auto add_y = vectorDuplicate(loc_ghost_y);
1743  CHKERR VecZeroEntries(inv_y);
1744  CHKERR VecCopy(loc_ghost_y, add_y);
1745  double *y_inv_array;
1746  CHKERR VecGetArray(inv_y, &y_inv_array);
1747  double *y_add_array;
1748  CHKERR VecGetArray(add_y, &y_add_array);
1749  double *y_array;
1750  CHKERR VecGetArray(loc_ghost_y, &y_array);
1751 
1752  auto data_inv_blocks = ctx->dataInvBlocksPtr;
1753  auto inv_block_ptr = &*data_inv_blocks->begin();
1754  auto data_blocks = ctx->dataBlocksPtr;
1755 
1756  auto *data_inv = dynamic_cast<DiagBlockInvStruture *>(ctx);
1757  auto index_view = &data_inv->indexView;
1758 
1759  std::vector<double> f;
1760 
1761  // That make a pass over block, applying lower diagonal
1762  for (auto s1 = 0; s1 != index_view->diagUpRange.size() - 1; ++s1) {
1763  auto lo = index_view->diagUpRange[s1];
1764  auto hi = index_view->diagUpRange[s1 + 1];
1765 
1766  // first index is off diag
1767  auto diag_index_ptr =
1768  index_view->upView[lo]; // First index is inverted diagonal
1769  ++lo;
1770 
1771  auto row = diag_index_ptr->getLocRow();
1772  auto col = diag_index_ptr->getLocCol();
1773  auto nb_rows = diag_index_ptr->getNbRows();
1774  auto nb_cols = diag_index_ptr->getNbCols();
1775  auto inv_shift = diag_index_ptr->getInvShift();
1776 
1777  for (; lo != hi; ++lo) {
1778  auto off_index_ptr = index_view->upView[lo];
1779  auto off_col = off_index_ptr->getLocCol();
1780  auto off_nb_cols = off_index_ptr->getNbCols();
1781  auto off_shift = off_index_ptr->getInvShift();
1782 #ifndef NDEBUG
1783  if (off_shift == -1)
1784  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "off_shift == -1");
1785 #endif // NDEBUG
1786  auto ptr = &inv_block_ptr[off_shift];
1787  if (std::min(nb_rows, off_nb_cols) > max_gemv_size) {
1788  cblas_dgemv(CblasRowMajor, CblasNoTrans, nb_rows, off_nb_cols, -1.0,
1789  ptr, off_nb_cols, &y_inv_array[off_col], 1, 1.0,
1790  &y_add_array[row], 1);
1791  } else {
1792  for (auto r = 0; r != nb_rows; ++r) {
1793  for (auto c = 0; c != off_nb_cols; ++c) {
1794  y_add_array[row + r] -=
1795  ptr[r * off_nb_cols + c] * y_inv_array[off_col + c];
1796  }
1797  }
1798  }
1799  }
1800 
1801 #ifndef NDEBUG
1802  if (inv_shift == -1)
1803  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "inv_shift == -1");
1804  if (inv_shift + nb_rows * nb_cols > data_inv_blocks->size())
1805  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1806  "inv_shift out of range %d > %d", inv_shift + nb_rows * nb_cols,
1807  data_inv_blocks->size());
1808 #endif // NDEBUG
1809 
1810  auto ptr = &inv_block_ptr[inv_shift];
1811  if (std::min(nb_rows, nb_cols) > max_gemv_size) {
1812  cblas_dgemv(CblasRowMajor, CblasNoTrans, nb_rows, nb_cols, -1.0, ptr,
1813  nb_cols, &y_add_array[col], 1, 1.0, &y_inv_array[row], 1);
1814  } else {
1815  for (auto r = 0; r != nb_rows; ++r) {
1816  for (auto c = 0; c != nb_cols; ++c) {
1817  y_inv_array[row + r] -= ptr[r * nb_cols + c] * y_add_array[col + c];
1818  }
1819  }
1820  }
1821  }
1822 
1823  // That make a pass over block, applying lower diagonal
1824  for (auto s1 = 0; s1 != index_view->diagLoRange.size() - 1; ++s1) {
1825  auto lo = index_view->diagLoRange[s1];
1826  auto hi = index_view->diagLoRange[s1 + 1];
1827 
1828  // first index is off diag
1829  auto diag_index_ptr =
1830  index_view->lowView[lo]; // First index is inverted diagonal
1831  ++lo;
1832 
1833  auto row = diag_index_ptr->getLocRow();
1834  auto col = diag_index_ptr->getLocCol();
1835  auto nb_rows = diag_index_ptr->getNbRows();
1836  auto nb_cols = diag_index_ptr->getNbCols();
1837  auto inv_shift = diag_index_ptr->getInvShift();
1838 
1839  f.resize(nb_cols);
1840  std::copy(&y_add_array[col], &y_add_array[col + nb_cols], f.begin());
1841 
1842  for (; lo != hi; ++lo) {
1843  auto off_index_ptr = index_view->lowView[lo];
1844  auto off_col = off_index_ptr->getLocCol();
1845  auto off_nb_cols = off_index_ptr->getNbCols();
1846  auto off_shift = off_index_ptr->getInvShift();
1847 #ifndef NDEBUG
1848  if (off_shift == -1)
1849  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "off_shift == -1");
1850 #endif // NDEBUG
1851  auto x_ptr = &x_array[off_col];
1852  auto ptr = &inv_block_ptr[off_shift];
1853  if (std::min(nb_rows, nb_cols) > max_gemv_size) {
1854  cblas_dgemv(CblasRowMajor, CblasNoTrans, nb_rows, off_nb_cols, -1.0,
1855  ptr, off_nb_cols, x_ptr, 1, 1.0, &f[0], 1);
1856  } else {
1857  for (auto r = 0; r != nb_rows; ++r) {
1858  for (auto c = 0; c != off_nb_cols; ++c) {
1859  f[r] -= ptr[r * off_nb_cols + c] * x_ptr[c];
1860  }
1861  }
1862  }
1863  }
1864 
1865 #ifndef NDEBUG
1866  if (inv_shift == -1)
1867  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "inv_shift == -1");
1868  if (inv_shift + nb_rows * nb_cols > data_inv_blocks->size())
1869  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1870  "inv_shift out of range %d > %d", inv_shift + nb_rows * nb_cols,
1871  data_inv_blocks->size());
1872 #endif // NDEBUG
1873 
1874  auto ptr = &inv_block_ptr[inv_shift];
1875  if (std::min(nb_rows, nb_cols) > max_gemv_size) {
1876  cblas_dgemv(CblasRowMajor, CblasNoTrans, nb_rows, nb_cols, -1.0, ptr,
1877  nb_cols, &f[0], 1, 1.0, &x_array[row], 1);
1878  } else {
1879  for (auto r = 0; r != nb_rows; ++r) {
1880  for (auto c = 0; c != nb_cols; ++c) {
1881  x_array[row + r] -= ptr[r * nb_cols + c] * f[c];
1882  }
1883  }
1884  }
1885  }
1886 
1887  CHKERR VecRestoreArray(loc_ghost_x, &x_array);
1888  CHKERR VecRestoreArray(inv_y, &y_inv_array);
1889  CHKERR VecRestoreArray(add_y, &y_add_array);
1890  CHKERR VecRestoreArray(loc_ghost_y, &y_array);
1891  CHKERR VecGhostRestoreLocalForm(ghost_x, &loc_ghost_x);
1892  CHKERR VecGhostRestoreLocalForm(ghost_y, &loc_ghost_y);
1893 
1894  // CHKERR VecGhostUpdateBegin(ghost_y, ADD_VALUES, SCATTER_REVERSE);
1895  // CHKERR VecGhostUpdateEnd(ghost_y, ADD_VALUES, SCATTER_REVERSE);
1896 
1897  switch (iora) {
1898  case INSERT_VALUES:
1899  CHKERR VecCopy(ghost_x, x);
1900  break;
1901  case ADD_VALUES:
1902  CHKERR VecAXPY(x, 1., ghost_x);
1903  break;
1904  default:
1905  CHK_MOAB_THROW(MOFEM_NOT_IMPLEMENTED, "Wrong InsertMode");
1906  }
1907 
1908 #ifndef NDEBUG
1909 
1910  auto print_norm = [&](auto msg, auto y) {
1912  PetscReal norm;
1913  CHKERR VecNorm(y, NORM_2, &norm);
1914  int nb_loc_y;
1915  CHKERR VecGetLocalSize(y, &nb_loc_y);
1916  int nb_y;
1917  CHKERR VecGetSize(y, &nb_y);
1918  MOFEM_LOG("WORLD", Sev::noisy)
1919  << msg << " " << nb_y << " " << nb_loc_y << " norm " << norm;
1921  };
1922 
1923  switch (iora) {
1924  case INSERT_VALUES:
1925  print_norm("solve_schur_block_shell insert x", x);
1926  print_norm("solve_schur_block_shell insert y", y);
1927  break;
1928  case ADD_VALUES:
1929  print_norm("solve_schur_block_shell add x", x);
1930  print_norm("solve_schur_block_shell add y", y);
1931  break;
1932  default:
1933  CHK_MOAB_THROW(MOFEM_NOT_IMPLEMENTED, "Wrong InsertMode");
1934  }
1935 
1936 #endif // NDEBUG
1937 
1938  // PetscLogFlops(xxx)
1939  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_BlockStructureSolve, 0, 0, 0, 0);
1940 
1942 }
1943 
1945  BlockStructure *ctx, const EntitiesFieldData::EntData &row_data,
1946  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
1947  InsertMode iora,
1948  boost::function<int(const DiagBlockIndex::Indexes *)> shift_extractor,
1949  boost::shared_ptr<std::vector<double>> data_blocks_ptr) {
1950 
1952 
1953  if (row_data.getIndices().empty())
1955  if (col_data.getIndices().empty())
1957 
1958 #ifndef NDEBUG
1959 
1960  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_BlockStructureSetValues, 0, 0, 0,
1961  0);
1962 
1963 #endif // NDEBUG
1964 
1965  auto get_row_data = [&]() -> std::pair<bool, VectorInt> {
1966  if (auto e_ptr = row_data.getFieldEntities()[0]) {
1967  if (auto stored_data_ptr =
1968  e_ptr->getSharedStoragePtr<EssentialBcStorage>()) {
1969  return std::make_pair(true, stored_data_ptr->entityIndices);
1970  }
1971  }
1972  return std::make_pair(false, row_data.getIndices());
1973  };
1974 
1975  auto row_indices = get_row_data();
1976  auto it_r = std::find(row_indices.second.begin(), row_indices.second.end(),
1977  -1) == row_indices.second.end();
1978  auto it_c =
1979  std::find(col_data.getIndices().begin(), col_data.getIndices().end(),
1980  -1) == col_data.getIndices().end();
1981 
1982  if (
1983 
1984  !row_indices.first
1985 
1986  && row_data.getFieldEntities()[0]->getNbDofsOnEnt() ==
1987  row_indices.second.size()
1988 
1989  && col_data.getFieldEntities()[0]->getNbDofsOnEnt() ==
1990  col_data.getIndices().size()
1991 
1992  && it_r && it_c
1993 
1994  ) {
1995 
1996  auto it = ctx->blockIndex.get<1>().find(
1997 
1998  boost::make_tuple(row_indices.second[0], col_data.getIndices()[0],
1999  row_indices.second.size(),
2000  col_data.getIndices().size())
2001 
2002  );
2003 
2004 #ifndef NDEBUG
2005 
2006  if (it == ctx->blockIndex.get<1>().end()) {
2007  MOFEM_LOG_CHANNEL("SELF");
2008  MOFEM_TAG_AND_LOG("SELF", Sev::error, "BlockMat")
2009  << "missing block: row " << row_data.getFieldDofs()[0]->getName()
2010  << " col " << col_data.getFieldDofs()[0]->getName();
2011  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "Block not allocated");
2012  }
2013 
2014 #endif
2015 
2016  auto shift = shift_extractor(&*it);
2017  if (shift != -1) {
2018 
2019  auto nbr = row_indices.second.size();
2020  auto nbc = col_data.getIndices().size();
2021  auto mat_row_ptr = &mat(0, 0);
2022  auto s_mat = &(*data_blocks_ptr)[shift];
2023  if (iora == ADD_VALUES) {
2024  for (int r = 0; r != nbr; ++r) {
2025  for (int c = 0; c != nbc; ++c) {
2026  *s_mat += *mat_row_ptr;
2027  ++mat_row_ptr;
2028  ++s_mat;
2029  }
2030  }
2031  } else {
2032  for (int r = 0; r != nbr; ++r) {
2033  for (int c = 0; c != nbc; ++c) {
2034  *s_mat = *mat_row_ptr;
2035  ++mat_row_ptr;
2036  ++s_mat;
2037  }
2038  }
2039  }
2040  }
2041 
2042  } else {
2043 
2044  // This is to manage all complexities with negative indices, etc., nodes
2045  // etc. It is much slower than the above, but it is more general
2046 
2047  std::vector<int> row_ent_idx;
2048  std::vector<int> col_ent_idx;
2049 
2050  auto get_ent_idx = [&](auto lo, auto hi, auto &idx) {
2051  idx.clear();
2052  idx.reserve(std::distance(lo, hi));
2053  for (; lo != hi; ++lo) {
2054  idx.emplace_back((*lo)->getEntDofIdx());
2055  }
2056  };
2057 
2058  auto row = 0;
2059  for (auto &rent : row_data.getFieldEntities()) {
2060  if (auto r_cache = rent->entityCacheRowDofs.lock()) {
2061 
2062  auto rlo = r_cache->loHi[0];
2063  auto rhi = r_cache->loHi[1];
2064  if (rlo == rhi)
2065  continue;
2066 
2067  get_ent_idx(rlo, rhi, row_ent_idx);
2068  auto gr = (*rlo)->getPetscGlobalDofIdx();
2069  auto nbr = std::distance(rlo, rhi);
2070  auto col = 0;
2071 
2072  for (auto &cent : col_data.getFieldEntities()) {
2073  if (auto c_cache = cent->entityCacheColDofs.lock()) {
2074 
2075  auto clo = c_cache->loHi[0];
2076  auto chi = c_cache->loHi[1];
2077  if (clo == chi)
2078  continue;
2079 
2080  auto nbc = std::distance(clo, chi);
2081  auto it = ctx->blockIndex.get<1>().find(
2082 
2083  boost::make_tuple(gr, (*clo)->getPetscGlobalDofIdx(), nbr, nbc)
2084 
2085  );
2086 
2087 #ifndef NDEBUG
2088 
2089  if (it == ctx->blockIndex.get<1>().end()) {
2090  MOFEM_LOG_CHANNEL("SELF");
2091  MOFEM_TAG_AND_LOG("SELF", Sev::error, "BlockMat")
2092  << "missing block: " << row_data.getFieldDofs()[0]->getName()
2093  << " : " << col_data.getFieldDofs()[0]->getName();
2094  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
2095  "Block not allocated");
2096  }
2097 
2098 #endif
2099 
2100  auto shift = shift_extractor(&*it);
2101  if (shift != -1) {
2102 
2103  auto mat_row_ptr0 = &mat(row, col);
2104  auto s_mat = &(*data_blocks_ptr)[shift];
2105 
2106  if (
2107 
2108  nbr == rent->getNbDofsOnEnt() && nbc == cent->getNbDofsOnEnt()
2109 
2110  && it_r && it_c
2111 
2112  ) {
2113 
2114  if (iora == ADD_VALUES) {
2115  for (int r = 0; r != nbr; ++r) {
2116  auto mat_row_ptr = mat_row_ptr0 + r * mat.size2();
2117  for (int c = 0; c != nbc; ++c) {
2118  *s_mat += *mat_row_ptr;
2119  ++mat_row_ptr;
2120  ++s_mat;
2121  }
2122  }
2123  } else {
2124  for (int r = 0; r != nbr; ++r) {
2125  auto mat_row_ptr = mat_row_ptr0 + r * mat.size2();
2126  for (int c = 0; c != nbc; ++c) {
2127  *s_mat = *mat_row_ptr;
2128  ++mat_row_ptr;
2129  ++s_mat;
2130  }
2131  }
2132  }
2133 
2134  } else if (
2135 
2136  nbc == cent->getNbDofsOnEnt()
2137 
2138  && it_r && it_c
2139 
2140  ) {
2141 
2142  if (iora == ADD_VALUES) {
2143  for (auto r : row_ent_idx) {
2144  auto mat_row_ptr = mat_row_ptr0 + r * mat.size2();
2145  for (int c = 0; c != nbc; ++c) {
2146  *s_mat += *mat_row_ptr;
2147  ++mat_row_ptr;
2148  ++s_mat;
2149  }
2150  }
2151  } else {
2152  for (auto r : row_ent_idx) {
2153  auto mat_row_ptr = mat_row_ptr0 + r * mat.size2();
2154  for (int c = 0; c != nbc; ++c) {
2155  *s_mat = *mat_row_ptr;
2156  ++mat_row_ptr;
2157  ++s_mat;
2158  }
2159  }
2160  }
2161 
2162  } else {
2163 
2164  get_ent_idx(clo, chi, col_ent_idx);
2165  auto row_idx = &row_indices.second[row];
2166  auto col_idx = &col_data.getIndices()[col];
2167  if (iora == ADD_VALUES) {
2168  for (auto r : row_ent_idx) {
2169  auto mat_row_ptr = mat_row_ptr0 + r * mat.size2();
2170  for (auto c : col_ent_idx) {
2171  if (row_idx[r] != -1 && col_idx[c] != -1)
2172  *s_mat += mat_row_ptr[c];
2173  ++s_mat;
2174  }
2175  }
2176  } else {
2177  for (auto r : row_ent_idx) {
2178  auto mat_row_ptr = mat_row_ptr0 + r * mat.size2();
2179  for (auto c : col_ent_idx) {
2180  if (row_idx[r] != -1 && col_idx[c] != -1)
2181  *s_mat = mat_row_ptr[c];
2182  ++s_mat;
2183  }
2184  }
2185  }
2186  }
2187  }
2188  }
2189  col += cent->getNbDofsOnEnt();
2190  }
2191  }
2192  row += rent->getNbDofsOnEnt();
2193  }
2194  }
2195 
2196 #ifndef NDEBUG
2197  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_BlockStructureSetValues, 0, 0, 0,
2198  0);
2199 #endif // NDEBUG
2200 
2202 }
2203 
2206  const EntitiesFieldData::EntData &col_data,
2207  const MatrixDouble &mat, InsertMode iora) {
2209  PetscBool is_mat_shell = PETSC_FALSE;
2210  PetscObjectTypeCompare((PetscObject)M, MATSHELL, &is_mat_shell);
2211  if (is_mat_shell) {
2212  BlockStructure *ctx;
2213  CHKERR MatShellGetContext(M, (void **)&ctx);
2215  ctx, row_data, col_data, mat, iora,
2216  [](const DiagBlockIndex::Indexes *idx) { return idx->getMatShift(); },
2217  ctx->dataBlocksPtr);
2218  } else {
2219  CHKERR MatSetValues<AssemblyTypeSelector<PETSC>>(M, row_data, col_data, mat,
2220  iora);
2221  }
2223 }
2224 
2226  Mat M, const EntitiesFieldData::EntData &row_data,
2227  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
2228  InsertMode iora) {
2230  PetscBool is_mat_shell = PETSC_FALSE;
2231  PetscObjectTypeCompare((PetscObject)M, MATSHELL, &is_mat_shell);
2232  if (is_mat_shell) {
2233  BlockStructure *ctx;
2234  CHKERR MatShellGetContext(M, (void **)&ctx);
2235  if (!ctx->preconditionerBlocksPtr)
2236  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
2237  "No preconditionerBlocksPtr");
2239  ctx, row_data, col_data, mat, iora,
2240  [](const DiagBlockIndex::Indexes *idx) { return idx->getInvShift(); },
2241  ctx->preconditionerBlocksPtr);
2242  } else {
2243  CHKERR MatSetValues<AssemblyTypeSelector<PETSC>>(M, row_data, col_data, mat,
2244  iora);
2245  }
2247 }
2248 
2249 boost::shared_ptr<NestSchurData> getNestSchurData(
2250 
2251  std::pair<SmartPetscObj<DM>, SmartPetscObj<DM>> dms,
2252  boost::shared_ptr<BlockStructure> block_mat_data_ptr,
2253 
2254  std::vector<std::string> fields_names, //< a00 fields
2255  std::vector<boost::shared_ptr<Range>>
2256  field_ents, //< a00 ranges (can be null),
2257  bool add_preconditioner_block) {
2258 
2259  if (!block_mat_data_ptr) {
2260  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block data not set");
2261  }
2262 
2263  if(fields_names.size() != field_ents.size())
2265  "fields_names.size() != field_ents.size()");
2266 
2267  auto [schur_dm, block_dm] = dms;
2268  auto schur_prb = getProblemPtr(schur_dm);
2269  auto block_prb = getProblemPtr(block_dm);
2270  auto m_field_ptr = getInterfacePtr(block_dm);
2271 
2272  auto schur_dofs_row = schur_prb->getNumeredRowDofsPtr();
2273  auto schur_dofs_col = schur_prb->getNumeredColDofsPtr();
2274  auto block_dofs_row = block_prb->getNumeredRowDofsPtr();
2275  auto block_dofs_col = block_prb->getNumeredColDofsPtr();
2276 
2277  auto ao_schur_row = schur_prb->getSubData()->getSmartRowMap();
2278  auto ao_schur_col = schur_prb->getSubData()->getSmartColMap();
2279  auto ao_block_row = block_prb->getSubData()->getSmartRowMap();
2280  auto ao_block_col = block_prb->getSubData()->getSmartColMap();
2281 
2282  auto schur_vec_x = createDMVector(schur_dm);
2283  auto block_vec_x = createDMVector(block_dm);
2284  auto schur_vec_y = vectorDuplicate(schur_vec_x);
2285  auto block_vec_y = vectorDuplicate(block_vec_x);
2286  CHKERR VecSetDM(schur_vec_x, PETSC_NULL);
2287  CHKERR VecSetDM(block_vec_x, PETSC_NULL);
2288  CHKERR VecSetDM(schur_vec_y, PETSC_NULL);
2289  CHKERR VecSetDM(block_vec_y, PETSC_NULL);
2290 
2291  auto get_vec = [&](auto schur_data) {
2292  std::vector<int> vec_r, vec_c;
2293  vec_r.reserve(schur_data->blockIndex.size());
2294  vec_c.reserve(schur_data->blockIndex.size());
2295  for (auto &d : schur_data->blockIndex.template get<1>()) {
2296  vec_r.push_back(d.getRow());
2297  vec_c.push_back(d.getCol());
2298  }
2299  return std::make_pair(vec_r, vec_c);
2300  };
2301 
2302  auto [vec_r_schur, vec_c_schur] = get_vec(block_mat_data_ptr);
2303  CHKERR AOApplicationToPetsc(ao_schur_row, vec_r_schur.size(),
2304  &*vec_r_schur.begin());
2305  CHKERR AOApplicationToPetsc(ao_schur_col, vec_c_schur.size(),
2306  &*vec_c_schur.begin());
2307  auto [vec_r_block, vec_c_block] = get_vec(block_mat_data_ptr);
2308  CHKERR AOApplicationToPetsc(ao_block_row, vec_r_block.size(),
2309  &*vec_r_block.begin());
2310  CHKERR AOApplicationToPetsc(ao_block_col, vec_c_block.size(),
2311  &*vec_c_block.begin());
2312 
2313  std::array<boost::shared_ptr<BlockStructure>, 4> data_ptrs;
2314 
2315  for (auto r = 0; r != 3; ++r) {
2316  data_ptrs[r] = boost::make_shared<BlockStructure>();
2317  data_ptrs[r]->dataBlocksPtr = block_mat_data_ptr->dataBlocksPtr;
2318  }
2319  data_ptrs[3] = boost::make_shared<DiagBlockInvStruture>();
2320  data_ptrs[3]->dataBlocksPtr = block_mat_data_ptr->dataBlocksPtr;
2321 
2322  data_ptrs[0]->ghostX = schur_vec_x;
2323  data_ptrs[0]->ghostY = schur_vec_y;
2324  data_ptrs[1]->ghostX = block_vec_x;
2325  data_ptrs[1]->ghostY = schur_vec_y;
2326  data_ptrs[2]->ghostX = schur_vec_x;
2327  data_ptrs[2]->ghostY = block_vec_y;
2328  data_ptrs[3]->ghostX = block_vec_x;
2329  data_ptrs[3]->ghostY = block_vec_y;
2330 
2331  int idx = 0;
2332  for (auto &d : block_mat_data_ptr->blockIndex.get<1>()) {
2333 
2334  auto insert = [&](auto &m, auto &dof_r, auto &dof_c, auto &d) {
2335  m.insert(
2336 
2338  dof_r->getPetscGlobalDofIdx(), dof_c->getPetscGlobalDofIdx(),
2339 
2340  d.getNbRows(), d.getNbCols(),
2341 
2342  dof_r->getPetscLocalDofIdx(), dof_c->getPetscLocalDofIdx(),
2343 
2344  d.getMatShift(), d.getInvShift()}
2345 
2346  );
2347  };
2348 
2349  if (vec_r_schur[idx] != -1 && vec_c_schur[idx] != -1) {
2350  auto schur_dof_r =
2351  schur_dofs_row->get<PetscGlobalIdx_mi_tag>().find(vec_r_schur[idx]);
2352  auto schur_dof_c =
2353  schur_dofs_col->get<PetscGlobalIdx_mi_tag>().find(vec_c_schur[idx]);
2354 #ifndef NDEBUG
2355  if (schur_dof_r == schur_dofs_row->get<PetscGlobalIdx_mi_tag>().end()) {
2356  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block <Schur> not found");
2357  }
2358  if (schur_dof_c == schur_dofs_col->get<PetscGlobalIdx_mi_tag>().end()) {
2359  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block <Schur> not found");
2360  }
2361 #endif // NDEBUG
2362  insert(data_ptrs[0]->blockIndex, *schur_dof_r, *schur_dof_c, d);
2363  }
2364 
2365  if (vec_r_schur[idx] != -1 && vec_c_block[idx] != -1) {
2366  auto schur_dof_r =
2367  schur_dofs_row->get<PetscGlobalIdx_mi_tag>().find(vec_r_schur[idx]);
2368  auto block_dof_c =
2369  block_dofs_col->get<PetscGlobalIdx_mi_tag>().find(vec_c_block[idx]);
2370 #ifndef NDEBUG
2371  if (schur_dof_r == schur_dofs_row->get<PetscGlobalIdx_mi_tag>().end()) {
2372  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block <Schur> not found");
2373  }
2374  if (block_dof_c == block_dofs_col->get<PetscGlobalIdx_mi_tag>().end()) {
2375  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block <Block> not found");
2376  }
2377 #endif
2378  insert(data_ptrs[1]->blockIndex, *schur_dof_r, *block_dof_c, d);
2379  }
2380 
2381  if (vec_r_block[idx] != -1 && vec_c_schur[idx] != -1) {
2382  auto block_dof_r =
2383  block_dofs_row->get<PetscGlobalIdx_mi_tag>().find(vec_r_block[idx]);
2384  auto schur_dof_c =
2385  schur_dofs_col->get<PetscGlobalIdx_mi_tag>().find(vec_c_schur[idx]);
2386 #ifndef NDEBUG
2387  if (block_dof_r == block_dofs_row->get<PetscGlobalIdx_mi_tag>().end()) {
2388  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block <Block> not found");
2389  }
2390  if (schur_dof_c == schur_dofs_col->get<PetscGlobalIdx_mi_tag>().end()) {
2391  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block <Schur> not found");
2392  }
2393 #endif // NDEBUG
2394  insert(data_ptrs[2]->blockIndex, *block_dof_r, *schur_dof_c, d);
2395  }
2396 
2397  if (vec_r_block[idx] != -1 && vec_c_block[idx] != -1) {
2398  auto block_dof_r =
2399  block_dofs_row->get<PetscGlobalIdx_mi_tag>().find(vec_r_block[idx]);
2400  auto block_dof_c =
2401  block_dofs_col->get<PetscGlobalIdx_mi_tag>().find(vec_c_block[idx]);
2402  insert(data_ptrs[3]->blockIndex, *block_dof_r, *block_dof_c, d);
2403  }
2404 
2405  ++idx;
2406  }
2407 
2408  // set data for a00 solve (inverse blocks)
2409  auto set_up_a00_data = [&](auto inv_block_data) {
2411 
2412  auto get_forward_list = [&]() {
2413  std::vector<int> list;
2414  list.reserve(fields_names.size());
2415  for (auto s = 0; s != fields_names.size(); ++s) {
2416  list.push_back(s);
2417  }
2418  return list;
2419  };
2420 
2421  auto get_reverse_list = [&]() {
2422  std::vector<int> list;
2423  list.reserve(fields_names.size());
2424  for (auto s = 0; s != fields_names.size(); ++s) {
2425  list.push_back(fields_names.size() - s - 1);
2426  }
2427  return list;
2428  };
2429 
2430  // get uids on the diagonal of a00 block
2431  auto get_a00_uids = [&](auto &&list) {
2432  auto get_field_bit = [&](auto field_name) {
2433  return m_field_ptr->get_field_bit_number(field_name);
2434  };
2435 
2436  std::vector<std::pair<UId, UId>> a00_uids;
2437  a00_uids.reserve(fields_names.size());
2438  for (auto ss : list) {
2439  auto field_bit = get_field_bit(fields_names[ss]);
2440  auto row_ents = field_ents[ss];
2441  if (row_ents) {
2442  for (auto p = row_ents->pair_begin(); p != row_ents->pair_end();
2443  ++p) {
2444  auto lo_uid =
2445  FieldEntity::getLoLocalEntityBitNumber(field_bit, p->first);
2446  auto hi_uid =
2447  FieldEntity::getHiLocalEntityBitNumber(field_bit, p->second);
2448  a00_uids.push_back(std::make_pair(lo_uid, hi_uid));
2449  }
2450  } else {
2451  auto lo_uid = FieldEntity::getLoLocalEntityBitNumber(
2452  field_bit, get_id_for_min_type<MBVERTEX>());
2453  auto hi_uid = FieldEntity::getHiLocalEntityBitNumber(
2454  field_bit, get_id_for_max_type<MBENTITYSET>());
2455  a00_uids.push_back(std::make_pair(lo_uid, hi_uid));
2456  }
2457  }
2458  return a00_uids;
2459  };
2460 
2461  // get global indexes for a00 block
2462  auto get_glob_idex_pairs = [&](auto &&uid_pairs) {
2463  std::vector<std::pair<int, int>> glob_idex_pairs;
2464  glob_idex_pairs.reserve(uid_pairs.size());
2465  auto dofs = block_prb->getNumeredRowDofsPtr();
2466 
2467  auto it = uid_pairs.rbegin();
2468  auto hi = uid_pairs.rend();
2469 
2470  for (; it != hi; ++it) {
2471  auto [lo_uid, hi_uid] = *it;
2472  auto lo_it = dofs->lower_bound(lo_uid);
2473  auto hi_it = dofs->upper_bound(hi_uid);
2474  if (lo_it != hi_it) {
2475  auto lo_idx = (*lo_it)->getPetscGlobalDofIdx();
2476  glob_idex_pairs.emplace_back(lo_idx, std::distance(lo_it, hi_it));
2477  }
2478  }
2479  // pair of local index and number of dofs
2480  return glob_idex_pairs;
2481  };
2482 
2483  auto &index_view =
2484  boost::dynamic_pointer_cast<DiagBlockInvStruture>(inv_block_data)
2485  ->indexView;
2486 
2487  // set struture to keep indices to mat solve of a00
2488  index_view.lowView.resize(0);
2489  index_view.lowView.reserve(inv_block_data->blockIndex.size());
2490  index_view.diagLoRange.resize(0);
2491  index_view.diagLoRange.reserve(inv_block_data->blockIndex.size() + 1);
2492  index_view.diagLoRange.push_back(0);
2493 
2494  index_view.upView.resize(0);
2495  index_view.upView.reserve(inv_block_data->blockIndex.size());
2496  index_view.diagUpRange.resize(0);
2497  index_view.diagUpRange.reserve(inv_block_data->blockIndex.size() + 1);
2498  index_view.diagUpRange.push_back(0);
2499 
2500  // this enable search by varying ranges
2501  using BlockIndexView = multi_index_container<
2502 
2503  const DiagBlockIndex::Indexes *,
2504 
2505  indexed_by<
2506 
2507  ordered_non_unique<const_mem_fun<DiagBlockIndex::Indexes, int,
2508  &DiagBlockIndex::Indexes::getRow>>
2509 
2510  >>;
2511 
2512  BlockIndexView block_index_view;
2513  for (auto it = inv_block_data->blockIndex.template get<1>().begin();
2514  it != inv_block_data->blockIndex.template get<1>().end(); ++it) {
2515  block_index_view.insert(&*it);
2516  }
2517 
2518  auto set_index_view_data = [&](
2519 
2520  int start,
2521 
2522  auto &&glob_idx_pairs,
2523 
2524  auto &view, auto &range
2525 
2526  ) {
2527  // iterate field & entities pairs
2528  for (auto s1 = start; s1 < glob_idx_pairs.size(); ++s1) {
2529 
2530  // iterate matrix indexes
2531  // index, and numer of dofs
2532  auto [lo_idx, nb] = glob_idx_pairs[s1];
2533  auto it = block_index_view.lower_bound(lo_idx);
2534  auto hi = block_index_view.end();
2535 
2536  // iterate rows
2537  while (it != hi &&
2538  ((*it)->getRow() + (*it)->getNbRows()) <= lo_idx + nb) {
2539 
2540  auto row = (*it)->getRow();
2541 
2542  auto get_diag_index =
2543  [&](auto it) -> const MoFEM::DiagBlockIndex::Indexes * {
2544  while (it != hi && (*it)->getRow() == row) {
2545  if ((*it)->getCol() == row &&
2546  (*it)->getNbCols() == (*it)->getNbRows()) {
2547  auto ptr = *it;
2548  return ptr;
2549  }
2550  ++it;
2551  }
2552  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Diagonal not found");
2553  return nullptr;
2554  };
2555 
2556  auto push_off_diag = [&](auto it, auto s1) {
2557  while (it != hi && (*it)->getRow() == row) {
2558  for (int s2 = 0; s2 < s1; ++s2) {
2559  auto [col_lo_idx, col_nb] = glob_idx_pairs[s2];
2560  if ((*it)->getCol() >= col_lo_idx &&
2561  (*it)->getCol() + (*it)->getNbCols() <=
2562  col_lo_idx + col_nb) {
2563  view.push_back(*it);
2564  }
2565  }
2566  ++it;
2567  }
2568  };
2569 
2570  view.push_back(get_diag_index(it));
2571  push_off_diag(it, s1);
2572  range.push_back(view.size());
2573 
2574  while (it != hi && (*it)->getRow() == row) {
2575  ++it;
2576  }
2577  }
2578  }
2579  };
2580 
2581  set_index_view_data(
2582 
2583  0,
2584 
2585  get_glob_idex_pairs(get_a00_uids(get_forward_list())),
2586 
2587  index_view.lowView, index_view.diagLoRange
2588 
2589  );
2590 
2591  set_index_view_data(
2592 
2593  0,
2594 
2595  get_glob_idex_pairs(get_a00_uids(get_reverse_list())),
2596 
2597  index_view.upView, index_view.diagUpRange
2598 
2599  );
2600 
2601  // calculate size of invert matrix vector, and set internal data
2602  auto set_inv_mat = [&](auto inv_mem_size, auto &view, auto &range) {
2603  auto get_vec = [&](auto &view) {
2604  std::vector<int> vec_r, vec_c;
2605  vec_r.reserve(view.size());
2606  vec_c.reserve(view.size());
2607  for (auto &i : view) {
2608  vec_r.push_back(i->getRow());
2609  vec_c.push_back(i->getCol());
2610  }
2611  return std::make_pair(vec_r, vec_c);
2612  };
2613 
2614  auto [vec_row, vec_col] = get_vec(view);
2616  AOPetscToApplication(ao_block_row, vec_row.size(), &*vec_row.begin()),
2617  "apply ao");
2619  AOPetscToApplication(ao_block_col, vec_col.size(), &*vec_col.begin()),
2620  "apply ao");
2621 
2622  for (auto s1 = 0; s1 != range.size() - 1; ++s1) {
2623  auto lo = range[s1];
2624  auto hi = range[s1 + 1];
2625  for (; lo != hi; ++lo) {
2626  auto diag_index_ptr = view[lo];
2627  auto row = vec_row[lo];
2628  auto col = vec_col[lo];
2629  auto nb_rows = diag_index_ptr->getNbRows();
2630  auto nb_cols = diag_index_ptr->getNbCols();
2631  auto it = block_mat_data_ptr->blockIndex.get<1>().find(
2632  boost::make_tuple(row, col, nb_rows, nb_cols));
2633  if (it == block_mat_data_ptr->blockIndex.get<1>().end()) {
2634  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
2635  "Block not found");
2636  }
2637  if (it->getInvShift() == -1) {
2638  it->getInvShift() = inv_mem_size;
2639  inv_mem_size += nb_rows * nb_cols;
2640  }
2641  diag_index_ptr->getInvShift() = it->getInvShift();
2642  }
2643  }
2644 
2645  return inv_mem_size;
2646  };
2647 
2648  // allocate memory for invert matrix solver
2649  auto allocate_inv_block = [&](auto inv_mem_size) {
2650  auto inv_data_ptr =
2651  boost::make_shared<std::vector<double>>(inv_mem_size, 0);
2652  data_ptrs[3]->dataInvBlocksPtr = inv_data_ptr;
2653  block_mat_data_ptr->dataInvBlocksPtr = data_ptrs[3]->dataInvBlocksPtr;
2654  };
2655 
2656  int inv_mem_size = 0;
2657  inv_mem_size =
2658  set_inv_mat(inv_mem_size, index_view.lowView, index_view.diagLoRange);
2659  inv_mem_size =
2660  set_inv_mat(inv_mem_size, index_view.upView, index_view.diagUpRange);
2661  allocate_inv_block(inv_mem_size);
2662 
2663  if (add_preconditioner_block) {
2664  auto preconditioned_block = boost::make_shared<std::vector<double>>(
2665  data_ptrs[3]->dataInvBlocksPtr->size(), 0);
2666  data_ptrs[3]->preconditionerBlocksPtr = preconditioned_block;
2667  data_ptrs[3]->multiplyByPreconditioner = true;
2668  block_mat_data_ptr->preconditionerBlocksPtr =
2669  data_ptrs[3]->preconditionerBlocksPtr;
2670  block_mat_data_ptr->multiplyByPreconditioner = false;
2671  }
2672 
2674  };
2675 
2676  CHKERR set_up_a00_data(data_ptrs[3]);
2677 
2678  MPI_Comm comm;
2679  CHKERR PetscObjectGetComm((PetscObject)schur_dm, &comm);
2680 
2681  auto create_shell_mat = [&](auto nb_r_loc, auto nb_c_loc, auto nb_r_glob,
2682  auto nb_c_glob, auto data_ptr) {
2683  Mat mat_raw;
2684  CHKERR MatCreateShell(comm, nb_r_loc, nb_c_loc, nb_r_glob, nb_c_glob,
2685  (void *)data_ptr.get(), &mat_raw);
2686  CHKERR setSchurBlockMatOps(mat_raw);
2687  return SmartPetscObj<Mat>(mat_raw);
2688  };
2689 
2690  auto schur_nb_global = schur_prb->getNbDofsRow();
2691  auto block_nb_global = block_prb->getNbDofsRow();
2692  auto schur_nb_local = schur_prb->getNbLocalDofsRow();
2693  auto block_nb_local = block_prb->getNbLocalDofsRow();
2694 
2695  std::array<SmartPetscObj<Mat>, 4> mats_array;
2696  mats_array[0] =
2697  create_shell_mat(schur_nb_local, schur_nb_local, schur_nb_global,
2698  schur_nb_global, data_ptrs[0]);
2699  mats_array[1] =
2700  create_shell_mat(schur_nb_local, block_nb_local, schur_nb_global,
2701  block_nb_global, data_ptrs[1]);
2702  mats_array[2] =
2703  create_shell_mat(block_nb_local, schur_nb_local, block_nb_global,
2704  schur_nb_global, data_ptrs[2]);
2705  mats_array[3] =
2706  create_shell_mat(block_nb_local, block_nb_local, block_nb_global,
2707  block_nb_global, data_ptrs[3]);
2708 
2709  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2710  << "(0, 0) " << schur_nb_local << " " << schur_nb_global << " "
2711  << data_ptrs[0]->blockIndex.size();
2712  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2713  << "(0, 1) " << schur_nb_local << " " << block_nb_local << " "
2714  << schur_nb_global << " " << block_nb_global << " "
2715  << data_ptrs[1]->blockIndex.size();
2716  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2717  << "(1, 0) " << block_nb_local << " " << schur_nb_local << " "
2718  << block_nb_global << " " << schur_nb_global << " "
2719  << data_ptrs[2]->blockIndex.size();
2720  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2721  << "(1, 1) " << block_nb_local << " " << block_nb_global << " "
2722  << data_ptrs[3]->blockIndex.size();
2723 
2724  MOFEM_LOG_SEVERITY_SYNC(comm, Sev::verbose);
2725 
2726  auto schur_is = schur_prb->getSubData()->getSmartRowIs();
2727  auto block_is = block_prb->getSubData()->getSmartRowIs();
2728 
2729  return boost::make_shared<NestSchurData>(
2730 
2731  mats_array, data_ptrs, block_mat_data_ptr,
2732  std::make_pair(schur_is, block_is)
2733 
2734  );
2735 }
2736 
2737 std::pair<SmartPetscObj<Mat>, boost::shared_ptr<NestSchurData>>
2738 createSchurNestedMatrix(boost::shared_ptr<NestSchurData> schur_net_data_ptr) {
2739 
2740  if (!schur_net_data_ptr)
2742 
2743  auto [mat_arrays, data_ptrs, block_mat_data_ptr, iss] = *schur_net_data_ptr;
2744  auto [schur_is, block_is] = iss;
2745 
2746  std::array<IS, 2> is_a = {schur_is, block_is};
2747  std::array<Mat, 4> mats_a = {
2748 
2749  mat_arrays[0], mat_arrays[1], mat_arrays[2], mat_arrays[3]
2750 
2751  };
2752 
2753  MPI_Comm comm;
2754  CHKERR PetscObjectGetComm((PetscObject)mat_arrays[0], &comm);
2755 
2756  Mat mat_raw;
2757  CHKERR MatCreateNest(
2758 
2759  comm, 2, is_a.data(), 2, is_a.data(), mats_a.data(), &mat_raw
2760 
2761  );
2762 
2763  return std::make_pair(SmartPetscObj<Mat>(mat_raw), schur_net_data_ptr);
2764 }
2765 
2767  return new OpSchurAssembleBegin();
2768 }
2769 
2770 OpSchurAssembleBase *
2771 createOpSchurAssembleEnd(std::vector<std::string> fields_name,
2772  std::vector<boost::shared_ptr<Range>> field_ents,
2773  std::vector<SmartPetscObj<AO>> sequence_of_aos,
2774  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
2775  std::vector<bool> sym_schur, bool symm_op,
2776  boost::shared_ptr<BlockStructure> diag_blocks) {
2777  if (symm_op)
2778  return new OpSchurAssembleEnd<SchurDSYSV>(fields_name, field_ents,
2779  sequence_of_aos, sequence_of_mats,
2780  sym_schur, symm_op, diag_blocks);
2781  else
2782  return new OpSchurAssembleEnd<SchurDGESV>(fields_name, field_ents,
2783  sequence_of_aos, sequence_of_mats,
2784  sym_schur, symm_op, diag_blocks);
2785 }
2786 
2787 OpSchurAssembleBase *
2788 createOpSchurAssembleEnd(std::vector<std::string> fields_name,
2789  std::vector<boost::shared_ptr<Range>> field_ents,
2790  std::vector<SmartPetscObj<AO>> sequence_of_aos,
2791  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
2792  std::vector<bool> sym_schur,
2793  std::vector<double> diag_eps, bool symm_op,
2794  boost::shared_ptr<BlockStructure> diag_blocks) {
2795  if (symm_op)
2796  return new OpSchurAssembleEnd<SchurDSYSV>(
2797  fields_name, field_ents, sequence_of_aos, sequence_of_mats, sym_schur,
2798  diag_eps, symm_op, diag_blocks);
2799  else
2800  return new OpSchurAssembleEnd<SchurDGESV>(
2801  fields_name, field_ents, sequence_of_aos, sequence_of_mats, sym_schur,
2802  diag_eps, symm_op, diag_blocks);
2803 }
2804 
2807 
2808  auto apply = [](PC pc, Vec f, Vec x) {
2810  Mat A, P;
2811  CHKERR PCGetOperators(pc, &A, &P);
2812  CHKERR MatSolve(P, f, x);
2814  };
2815 
2816  CHKERR PCSetType(pc, PCSHELL);
2817  CHKERR PCShellSetApply(pc, apply);
2818  CHKERR PCShellSetName(pc, "MoFEMSchurBlockPC");
2819 
2821 }
2822 
2824  return setSchurA00MatSolvePC(pc);
2825 }
2826 
2827 // This is used to assemble local element matrices for Schur complement
2828 // and matrix for KSP
2829 template <>
2832  const EntitiesFieldData::EntData &col_data,
2833  const MatrixDouble &mat, InsertMode iora) {
2834  return SchurElemMats::MatSetValues(M, row_data, col_data, mat, iora);
2835 }
2836 
2839  const EntitiesFieldData::EntData &col_data,
2840  const MatrixDouble &mat, InsertMode iora) {
2841  return MatSetValues<AssemblyTypeSelector<PETSC>>(M, row_data, col_data, mat,
2842  iora);
2843 }
2844 
2845 // Is assumed that standard PETSc assembly works for matrices used by KSP
2846 SchurBackendMatSetValuesPtr::MatSetValuesPtr
2847  SchurBackendMatSetValuesPtr::matSetValuesPtr = schur_mat_set_values_wrap;
2848 
2849 // We assemble matrix for KSP and store local matrices for Schur complement.
2850 // Schur complement is calculated and assembled in OpSchurAssembleEnd.
2853  const EntitiesFieldData::EntData &col_data,
2854  const MatrixDouble &mat, InsertMode iora) {
2856  CHKERR SchurBackendMatSetValuesPtr::matSetValuesPtr(M, row_data, col_data,
2857  mat, iora);
2858  CHKERR assembleStorage(row_data, col_data, mat, iora);
2860 }
2861 
2862 // All is now wrapped in specialisation of
2863 // MatSetValues<AssemblyTypeSelector<SCHUR>>
2864 template <>
2865 MoFEMErrorCode MatSetValues<AssemblyTypeSelector<SCHUR>>(
2866  Mat M, const EntitiesFieldData::EntData &row_data,
2867  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
2868  InsertMode iora) {
2869  return MatSetValues<SchurElemMats>(M, row_data, col_data, mat, iora);
2870 }
2871 
2872 // Standard (PETSc) assembly block matrices do noe work
2873 template <>
2876  const EntitiesFieldData::EntData &col_data,
2877  const MatrixDouble &mat, InsertMode iora) {
2878  return shell_block_mat_asmb_wrap(M, row_data, col_data, mat, iora);
2879 }
2880 
2882 
2883  static MoFEMErrorCode MatSetValues(Mat M,
2884  const EntitiesFieldData::EntData &row_data,
2885  const EntitiesFieldData::EntData &col_data,
2886  const MatrixDouble &mat, InsertMode iora);
2887 };
2888 
2890  SchurBackendMatSetValuesPtr::matSetValuesBlockPtr =
2892 
2895  const EntitiesFieldData::EntData &row_data,
2896  const EntitiesFieldData::EntData &col_data,
2897  const MatrixDouble &mat, InsertMode iora) {
2899  CHKERR SchurBackendMatSetValuesPtr::matSetValuesBlockPtr(M, row_data,
2900  col_data, mat, iora);
2901  CHKERR assembleStorage(row_data, col_data, mat, iora);
2903 }
2904 
2905 template <>
2908  const EntitiesFieldData::EntData &row_data,
2909  const EntitiesFieldData::EntData &col_data,
2910  const MatrixDouble &mat, InsertMode iora) {
2911  return SchurElemMatsBlock::MatSetValues(M, row_data, col_data, mat, iora);
2912 }
2913 
2915 
2916  static MoFEMErrorCode MatSetValues(Mat M,
2917  const EntitiesFieldData::EntData &row_data,
2918  const EntitiesFieldData::EntData &col_data,
2919  const MatrixDouble &mat, InsertMode iora);
2920 };
2921 
2923  SchurBackendMatSetValuesPtr::matSetValuesPreconditionedBlockPtr =
2925 
2927  Mat M, const EntitiesFieldData::EntData &row_data,
2928  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
2929  InsertMode iora) {
2931  CHKERR SchurBackendMatSetValuesPtr::matSetValuesPreconditionedBlockPtr(
2932  M, row_data, col_data, mat, iora);
2933  CHKERR assembleStorage(row_data, col_data, mat, iora);
2935 }
2936 
2937 template <>
2939  Mat M, const EntitiesFieldData::EntData &row_data,
2940  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
2941  InsertMode iora) {
2942  return SchurElemMatsPreconditionedBlock::MatSetValues(M, row_data, col_data,
2943  mat, iora);
2944 }
2945 
2947 schurSwitchPreconditioner(boost::shared_ptr<BlockStructure> block_mat_data) {
2949  if (block_mat_data->multiplyByPreconditioner) {
2950  block_mat_data->multiplyByPreconditioner = false;
2951  } else {
2952  if (!block_mat_data->preconditionerBlocksPtr) {
2953  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
2954  "preconditionerBlocksPtr not set");
2955  }
2956  block_mat_data->multiplyByPreconditioner = true;
2957  }
2959 }
2960 
2962 schurSaveBlockMesh(boost::shared_ptr<BlockStructure> block_mat_data,
2963  std::string filename) {
2965 
2966  moab::Core core;
2967  moab::Interface &moab = core;
2968 
2969  ReadUtilIface *iface;
2970  CHKERR moab.query_interface(iface);
2971 
2972  auto size = block_mat_data->blockIndex.size();
2973 
2974  EntityHandle starting_vertex;
2975  vector<double *> arrays_coord;
2976  CHKERR iface->get_node_coords(3, 4 * size, 0, starting_vertex, arrays_coord);
2977  EntityHandle starting_handle;
2978  EntityHandle *array;
2979  CHKERR iface->get_element_connect(size, 4, MBQUAD, 0, starting_handle, array);
2980 
2981  double def_val_nrm2 = 0;
2982  Tag th_nrm2;
2983  CHKERR moab.tag_get_handle("nrm2", 1, MB_TYPE_DOUBLE, th_nrm2,
2984  MB_TAG_CREAT | MB_TAG_DENSE, &def_val_nrm2);
2985 
2986  int def_val_mat_shift = 0;
2987  Tag th_mat_shift;
2988  CHKERR moab.tag_get_handle("mat_shift", 1, MB_TYPE_INTEGER, th_mat_shift,
2989  MB_TAG_CREAT | MB_TAG_DENSE, &def_val_mat_shift);
2990 
2991  int i = 0;
2992  for (auto &d : block_mat_data->blockIndex) {
2993  auto row = d.getRow();
2994  auto col = d.getCol();
2995  auto nb_rows = d.getNbRows();
2996  auto nb_cols = d.getNbCols();
2997  auto mat_shift = d.getMatShift();
2998 
2999  // q0
3000  arrays_coord[0][4 * i + 0] = row;
3001  arrays_coord[1][4 * i + 0] = col;
3002  arrays_coord[2][4 * i + 0] = 0;
3003 
3004  // q1
3005  arrays_coord[0][4 * i + 1] = row + nb_rows;
3006  arrays_coord[1][4 * i + 1] = col;
3007  arrays_coord[2][4 * i + 1] = 0;
3008 
3009  // q2
3010  arrays_coord[0][4 * i + 2] = row + nb_rows;
3011  arrays_coord[1][4 * i + 2] = col + nb_cols;
3012  arrays_coord[2][4 * i + 2] = 0;
3013 
3014  // q3
3015  arrays_coord[0][4 * i + 3] = row;
3016  arrays_coord[1][4 * i + 3] = col + nb_cols;
3017  arrays_coord[2][4 * i + 3] = 0;
3018 
3019  // ele conn
3020  array[4 * i + 0] = starting_vertex + 4 * i + 0;
3021  array[4 * i + 1] = starting_vertex + 4 * i + 1;
3022  array[4 * i + 2] = starting_vertex + 4 * i + 2;
3023  array[4 * i + 3] = starting_vertex + 4 * i + 3;
3024 
3025  auto prt = &(*block_mat_data->dataBlocksPtr)[d.getMatShift()];
3026  auto nrm2 = cblas_dnrm2(nb_rows * nb_cols, prt, 1);
3027  EntityHandle ele = starting_handle + i;
3028  CHKERR moab.tag_set_data(th_nrm2, &ele, 1, &nrm2);
3029  CHKERR moab.tag_set_data(th_mat_shift, &ele, 1, &mat_shift);
3030 
3031  ++i;
3032  }
3033 
3034  CHKERR iface->update_adjacencies(starting_handle, size, 4, array);
3035  CHKERR moab.write_file(filename.c_str(), "VTK", "");
3036 
3038 }
3039 
3040 } // namespace MoFEM
UBlasMatrix< double >
NOSPACE
@ NOSPACE
Definition: definitions.h:83
EigenMatrix::getMat
FTensor::Tensor2_symmetric< double, 3 > getMat(Val< double, 3 > &t_val, Vec< double, 3 > &t_vec, Fun< double > f)
Get the Mat object.
Definition: MatrixFunction.cpp:53
MoFEMFunctionReturnHot
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:460
CHK_MOAB_THROW
#define CHK_MOAB_THROW(err, msg)
Check error code of MoAB function and throw MoFEM exception.
Definition: definitions.h:589
MoFEM::solve_schur_block_shell
static MoFEMErrorCode solve_schur_block_shell(Mat mat, Vec x, Vec y, InsertMode iora)
Definition: Schur.cpp:1716
MoFEM::id_from_handle
auto id_from_handle(const EntityHandle h)
Definition: Templates.hpp:1868
MoFEM::SchurElemMats::rowIndices
static boost::ptr_vector< VectorInt > rowIndices
Definition: Schur.cpp:224
MoFEM::EntitiesFieldData::EntData
Data on single entity (This is passed as argument to DataOperator::doWork)
Definition: EntitiesFieldData.hpp:128
MoFEM::DiagBlockIndex
Definition: Schur.cpp:232
MoFEM::EntitiesFieldData::EntData::getFieldEntities
const VectorFieldEntities & getFieldEntities() const
get field entities
Definition: EntitiesFieldData.hpp:1267
MoFEM::OpSchurAssembleEndImpl::invMat
MatrixDouble invMat
Definition: Schur.cpp:135
MoFEM::BlockStructure::dataBlocksPtr
boost::shared_ptr< std::vector< double > > dataBlocksPtr
Definition: Schur.cpp:326
MoFEM::DiagBlockIndex::Indexes::row
int row
Definition: Schur.cpp:266
MoFEM::zero_rows_columns
static PetscErrorCode zero_rows_columns(Mat A, PetscInt N, const PetscInt rows[], PetscScalar diag, Vec x, Vec b)
Definition: Schur.cpp:1380
MoFEM::MatSetValues
MoFEMErrorCode MatSetValues(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const double *ptr, InsertMode iora)
Assemble PETSc matrix.
Definition: EntitiesFieldData.hpp:1634
MoFEM::DiagBlockIndex::blockIndex
BlockIndex blockIndex
blocks indexes storage
Definition: Schur.cpp:318
MoFEM::field_bit_from_bit_number
auto field_bit_from_bit_number(const int bit_number)
get field bit id from bit number
Definition: Templates.hpp:1908
MoFEM::DiagBlockIndex::Indexes::nb_cols
int nb_cols
Definition: Schur.cpp:269
MOFEM_LOG_SEVERITY_SYNC
#define MOFEM_LOG_SEVERITY_SYNC(comm, severity)
Synchronise "SYNC" on curtain severity level.
Definition: LogManager.hpp:352
MoFEM::createISGeneral
auto createISGeneral(MPI_Comm comm, PetscInt n, const PetscInt idx[], PetscCopyMode mode)
Creates a data structure for an index set containing a list of integers.
Definition: PetscSmartObj.hpp:287
MOFEM_LOG_CHANNEL
#define MOFEM_LOG_CHANNEL(channel)
Set and reset channel.
Definition: LogManager.hpp:284
EntityHandle
MoFEM::BlockStructure::preconditionerBlocksPtr
boost::shared_ptr< std::vector< double > > preconditionerBlocksPtr
Definition: Schur.cpp:328
OP_SCHUR_ASSEMBLE_BASE
MoFEM::FEMethod
structure for User Loop Methods on finite elements
Definition: LoopMethods.hpp:369
MoFEM::MatSetValues< BlockStructure >
MoFEMErrorCode MatSetValues< BlockStructure >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2875
MoFEM::cmp_uid_hi
static auto cmp_uid_hi(const UId &b, const boost::weak_ptr< FieldEntity > &a)
Definition: ForcesAndSourcesCore.cpp:29
CHK_THROW_MESSAGE
#define CHK_THROW_MESSAGE(err, msg)
Check and throw MoFEM exception.
Definition: definitions.h:609
MoFEM::SchurElemMats::SchurElemStorage
multi_index_container< const SchurElemMats *, indexed_by< ordered_unique< tag< uid_mi_tag >, composite_key< SchurElemMats, member< SchurElemMats, const UId, &SchurElemMats::uidRow >, member< SchurElemMats, const UId, &SchurElemMats::uidCol > > >, ordered_non_unique< tag< col_mi_tag >, member< SchurElemMats, const UId, &SchurElemMats::uidCol > > > > SchurElemStorage
Definition: Schur.cpp:221
MoFEM::OpSchurAssembleBegin::OpSchurAssembleBegin
OpSchurAssembleBegin()
Definition: Schur.cpp:548
MoFEM::shell_block_mat_asmb_wrap_impl
MoFEMErrorCode shell_block_mat_asmb_wrap_impl(BlockStructure *ctx, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora, boost::function< int(const DiagBlockIndex::Indexes *)> shift_extractor, boost::shared_ptr< std::vector< double >> data_blocks_ptr)
Definition: Schur.cpp:1944
MoFEM::solve_add
static PetscErrorCode solve_add(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1376
MoFEM::OpSchurAssembleBase::OpSchurAssembleBase
OpSchurAssembleBase()=delete
lapack_dgesv
static __CLPK_integer lapack_dgesv(__CLPK_integer n, __CLPK_integer nrhs, __CLPK_doublereal *a, __CLPK_integer lda, __CLPK_integer *ipiv, __CLPK_doublereal *b, __CLPK_integer ldb)
Definition: lapack_wrap.h:176
MoFEM::Exceptions::MoFEMErrorCode
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
Definition: Exceptions.hpp:56
MoFEM::Types::MatrixDouble
UBlasMatrix< double > MatrixDouble
Definition: Types.hpp:77
MoFEM::OpSchurAssembleEnd< SchurDSYSV >
Definition: Schur.cpp:154
MoFEM::BlockStructure::ghostX
SmartPetscObj< Vec > ghostX
Definition: Schur.cpp:323
A
constexpr AssemblyType A
Definition: operators_tests.cpp:30
MoFEM::SchurElemMats::getMat
auto & getMat() const
Definition: Schur.cpp:182
MoFEM::SchurDSYSV
Definition: Schur.cpp:1113
MoFEM::OpSchurAssembleEndImpl::fieldEnts
std::vector< boost::shared_ptr< Range > > fieldEnts
Definition: Schur.cpp:129
MoFEM::SchurBackendMatSetValuesPtr::MatSetValuesPtr
boost::function< MoFEMErrorCode(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)> MatSetValuesPtr
Definition: Schur.hpp:214
MoFEM::DiagBlockIndex::Indexes::getNbRows
int getNbRows() const
Definition: Schur.cpp:250
MoFEM::SchurEvents::SchurEvents
SchurEvents()
Definition: Schur.cpp:355
MoFEM::SchurEvents::MOFEM_EVENT_BlockStructureMult
static PetscLogEvent MOFEM_EVENT_BlockStructureMult
Definition: Schur.hpp:30
MoFEM::DiagBlockIndex::Indexes::col
int col
Definition: Schur.cpp:267
MoFEM::shell_block_preconditioner_mat_asmb_wrap
MoFEMErrorCode shell_block_preconditioner_mat_asmb_wrap(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2225
MoFEM::SchurElemMats::colIndices
static boost::ptr_vector< VectorInt > colIndices
Definition: Schur.cpp:225
MoFEM::SchurEvents::MOFEM_EVENT_schurMatSetValues
static PetscLogEvent MOFEM_EVENT_schurMatSetValues
Definition: Schur.hpp:27
MoFEM::BlockStructure::ghostY
SmartPetscObj< Vec > ghostY
Definition: Schur.cpp:324
sdf.r
int r
Definition: sdf.py:8
MoFEM::OpSchurAssembleBaseImpl::OpSchurAssembleBaseImpl
OpSchurAssembleBaseImpl()=delete
MoFEM::DiagBlockIndex::Indexes::getCol
int getCol() const
Definition: Schur.cpp:249
MoFEM::DiagBlockInvStruture::A00SolverView::upView
std::vector< const Indexes * > upView
Definition: Schur.cpp:341
MoFEM::DiagBlockIndex::Indexes
block data indexes
Definition: Schur.cpp:240
MoFEM::debug_schur
constexpr bool debug_schur
Definition: Schur.cpp:12
MoFEM::Interface
DeprecatedCoreInterface Interface
Definition: Interface.hpp:2002
MoFEM::DiagBlockIndex::Indexes::nb_rows
int nb_rows
Definition: Schur.cpp:268
MoFEM::schur_mat_set_values_wrap
MoFEMErrorCode schur_mat_set_values_wrap(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2838
c
const double c
speed of light (cm/ns)
Definition: initial_diffusion.cpp:39
MoFEM::SchurElemMats::col_mi_tag
Definition: Schur.cpp:202
MoFEM::DiagBlockIndex::Indexes::colShift
int colShift() const
Definition: Schur.cpp:261
MoFEM::SchurElemMats::iDX
const size_t iDX
Definition: Schur.cpp:178
MoFEM::SchurElemMats::uidCol
UId uidCol
Definition: Schur.cpp:180
PlasticOps::M
FTensor::Index< 'M', 3 > M
Definition: PlasticOps.hpp:117
MoFEM::OpSchurAssembleEnd< SchurDGESV >
Definition: Schur.cpp:162
CHKERR
#define CHKERR
Inline error check.
Definition: definitions.h:548
MoFEM::DiagBlockIndex::Indexes::getRow
int getRow() const
Definition: Schur.cpp:248
MoFEM::createDMVector
auto createDMVector(DM dm)
Get smart vector from DM.
Definition: DMMoFEM.hpp:1099
MoFEM::mult_add
static PetscErrorCode mult_add(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1370
MoFEM::createBlockMat
SchurShellMatData createBlockMat(DM dm, boost::shared_ptr< BlockStructure > data)
Create a Schur Mat object.
Definition: Schur.cpp:1503
MoFEM::DiagBlockInvStruture::A00SolverView
Definition: Schur.cpp:338
MoFEM::DiagBlockIndex::Indexes::Indexes
Indexes(int row, int col, int nb_rows, int nb_cols, int loc_row, int loc_col, int mat_shift, int inv_shift)
Definition: Schur.cpp:242
MoFEM::DMoFEMLoopFiniteElementsUpAndLowRank
PetscErrorCode DMoFEMLoopFiniteElementsUpAndLowRank(DM dm, const char fe_name[], MoFEM::FEMethod *method, int low_rank, int up_rank, CacheTupleWeakPtr cache_ptr=CacheTupleSharedPtr())
Executes FEMethod for finite elements in DM.
Definition: DMMoFEM.cpp:567
MoFEM::mat_zero
static PetscErrorCode mat_zero(Mat m)
Definition: Schur.cpp:1472
MoFEM::OpSchurAssembleBegin
Clear Schur complement internal data.
Definition: Schur.cpp:61
MoFEM::SchurElemMats::SchurElemMats
SchurElemMats(const size_t idx, const UId uid_row, const UId uid_col)
Definition: Schur.cpp:371
MoFEM
implementation of Data Operators for Forces and Sources
Definition: Common.hpp:10
MoFEM::OpSchurAssembleEndImpl::offMatInvDiagOffMat
MatrixDouble offMatInvDiagOffMat
Definition: Schur.cpp:137
MoFEM::SchurEvents::MOFEM_EVENT_BlockStructureSolve
static PetscLogEvent MOFEM_EVENT_BlockStructureSolve
Definition: Schur.hpp:31
MoFEM::OpSchurAssembleEndImpl::sequenceOfMats
std::vector< SmartPetscObj< Mat > > sequenceOfMats
Definition: Schur.cpp:131
a
constexpr double a
Definition: approx_sphere.cpp:30
MoFEM::mult_schur_block_shell
static MoFEMErrorCode mult_schur_block_shell(Mat mat, Vec x, Vec y, InsertMode iora)
Definition: Schur.cpp:1539
MoFEM::createOpSchurAssembleEnd
OpSchurAssembleBase * createOpSchurAssembleEnd(std::vector< std::string > fields_name, std::vector< boost::shared_ptr< Range >> field_ents, std::vector< SmartPetscObj< AO >> sequence_of_aos, std::vector< SmartPetscObj< Mat >> sequence_of_mats, std::vector< bool > sym_schur, std::vector< double > diag_eps, bool symm_op, boost::shared_ptr< BlockStructure > diag_blocks)
Construct a new Op Schur Assemble End object.
Definition: Schur.cpp:2788
MoFEM::schurSwitchPreconditioner
MoFEMErrorCode schurSwitchPreconditioner(boost::shared_ptr< BlockStructure > block_mat_data)
Switch preconditioner.
Definition: Schur.cpp:2947
BITFIELDID_SIZE
#define BITFIELDID_SIZE
max number of fields
Definition: definitions.h:233
MoFEM::DiagBlockInvStruture::A00SolverView::diagLoRange
std::vector< int > diagLoRange
Definition: Schur.cpp:340
MoFEM::SchurElemMats::assembleStorage
static MoFEMErrorCode assembleStorage(const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:376
MoFEM::SchurDGESV::invertMat
static auto invertMat(const SchurElemMats *row_ptr, MatrixDouble &inv, double eps)
Definition: Schur.cpp:1148
MoFEM::DiagBlockIndex::Indexes::mat_shift
int mat_shift
Definition: Schur.cpp:272
MoFEM::FieldEntity::getHandleFromUniqueId
static auto getHandleFromUniqueId(const UId uid)
Get the Handle From Unique Id.
Definition: FieldEntsMultiIndices.hpp:192
MoFEM::Types::UId
uint128_t UId
Unique Id.
Definition: Types.hpp:31
MoFEM::DiagBlockIndex::BlockIndex
multi_index_container< Indexes, indexed_by< ordered_non_unique< composite_key< Indexes, const_mem_fun< Indexes, int, &Indexes::getLocRow >, const_mem_fun< Indexes, int, &Indexes::getLocCol >, const_mem_fun< Indexes, int, &Indexes::getNbRows >, const_mem_fun< Indexes, int, &Indexes::getNbCols > > >, ordered_unique< composite_key< Indexes, const_mem_fun< Indexes, int, &Indexes::getRow >, const_mem_fun< Indexes, int, &Indexes::getCol >, const_mem_fun< Indexes, int, &Indexes::getNbRows >, const_mem_fun< Indexes, int, &Indexes::getNbCols > > >, ordered_non_unique< const_mem_fun< Indexes, int, &Indexes::rowShift > >, ordered_non_unique< const_mem_fun< Indexes, int, &Indexes::colShift > > > > BlockIndex
Definition: Schur.cpp:316
lapack_dsysv
static __CLPK_integer lapack_dsysv(char uplo, __CLPK_integer n, __CLPK_integer nrhs, __CLPK_doublereal *a, __CLPK_integer lda, __CLPK_integer *ipiv, __CLPK_doublereal *b, __CLPK_integer ldb, __CLPK_doublereal *work, __CLPK_integer lwork)
Definition: lapack_wrap.h:220
MoFEM::DiagBlockIndex::~DiagBlockIndex
virtual ~DiagBlockIndex()=default
MoFEM::SchurElemMats::uid_mi_tag
Definition: Schur.cpp:201
MoFEM::SchurElemMats::getColInd
auto & getColInd() const
Definition: Schur.cpp:184
double
MoFEM::DiagBlockInvStruture
Definition: Schur.cpp:334
MoFEM::BlockStructure::parentBlockStructurePtr
boost::shared_ptr< std::vector< double > > parentBlockStructurePtr
Definition: Schur.cpp:329
convert.type
type
Definition: convert.py:64
MoFEM::OpSchurAssembleEndImpl::diagEps
std::vector< double > diagEps
Definition: Schur.cpp:133
MoFEM::FieldEntity::getLocalUniqueIdCalculate
UId getLocalUniqueIdCalculate()
Get the Local Unique Id Calculate object.
Definition: FieldEntsMultiIndices.hpp:136
MoFEM::DiagBlockIndex::Indexes::rowShift
int rowShift() const
Definition: Schur.cpp:257
MoFEM::SchurElemMats::~SchurElemMats
virtual ~SchurElemMats()=default
EshelbianPlasticity::P
@ P
Definition: EshelbianContact.cpp:193
MoFEM::DiagBlockIndex::Indexes::getLocRow
int getLocRow() const
Definition: Schur.cpp:252
MoFEM::cmp_uid_lo
static auto cmp_uid_lo(const boost::weak_ptr< FieldEntity > &a, const UId &b)
Definition: ForcesAndSourcesCore.cpp:18
MoFEM::DiagBlockIndex::Indexes::loc_col
int loc_col
Definition: Schur.cpp:271
MoFEM::max_gemv_size
constexpr int max_gemv_size
Definition: Schur.cpp:1537
MoFEM::OpSchurAssembleBegin::doWork
MoFEMErrorCode doWork(int side, EntityType type, EntitiesFieldData::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
Definition: Schur.cpp:551
MoFEM::SchurElemMats::schurL2Storage
static SchurElemStorage schurL2Storage
Definition: Schur.cpp:229
MoFEM::EntitiesFieldData::EntData::getIndices
const VectorInt & getIndices() const
Get global indices of dofs on entity.
Definition: EntitiesFieldData.hpp:1204
MoFEM::OpSchurAssembleBaseImpl::assembleSchurMat
MoFEMErrorCode assembleSchurMat(Mat S, const UId &uid_row, VectorInt &row_ind, const UId &uid_col, VectorInt &col_ind, MatrixDouble &m, InsertMode iora)
Assemble Schur complement.
Definition: Schur.cpp:38
MoFEM::createOpSchurAssembleBegin
OpSchurAssembleBase * createOpSchurAssembleBegin()
Definition: Schur.cpp:2766
MoFEM::SchurElemMats::maxIndexCounter
static size_t maxIndexCounter
Definition: Schur.cpp:227
MoFEM::DiagBlockIndex::Indexes::inv_shift
int inv_shift
Definition: Schur.cpp:273
MoFEM::SchurEvents::MOFEM_EVENT_opSchurAssembleEnd
static PetscLogEvent MOFEM_EVENT_opSchurAssembleEnd
Definition: Schur.hpp:28
MoFEM::solve
static PetscErrorCode solve(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1373
MoFEM::SchurElemMatsBlock
Definition: Schur.cpp:2881
MoFEM::DiagBlockIndex::Indexes::getLocCol
int getLocCol() const
Definition: Schur.cpp:253
MOFEM_OPERATION_UNSUCCESSFUL
@ MOFEM_OPERATION_UNSUCCESSFUL
Definition: definitions.h:34
MoFEM::type_from_handle
auto type_from_handle(const EntityHandle h)
get type from entity handle
Definition: Templates.hpp:1876
MoFEM::EntitiesFieldData::EntData::getFieldDofs
const VectorDofs & getFieldDofs() const
get dofs data stature FEDofEntity
Definition: EntitiesFieldData.hpp:1259
MoFEM::mult
static PetscErrorCode mult(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1367
MoFEM::BlockStructure::dataInvBlocksPtr
boost::shared_ptr< std::vector< double > > dataInvBlocksPtr
Definition: Schur.cpp:327
MoFEM::PetscGlobalIdx_mi_tag
Definition: TagMultiIndices.hpp:38
MoFEM::DiagBlockIndex::Indexes::getMatShift
int & getMatShift() const
Definition: Schur.cpp:254
MoFEM::createSchurNestedMatrix
std::pair< SmartPetscObj< Mat >, boost::shared_ptr< NestSchurData > > createSchurNestedMatrix(boost::shared_ptr< NestSchurData > schur_net_data_ptr)
Create a Mat Diag Blocks object.
Definition: Schur.cpp:2738
MoFEM::MatSetValues< SchurElemMatsPreconditionedBlock >
MoFEMErrorCode MatSetValues< SchurElemMatsPreconditionedBlock >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2938
i
FTensor::Index< 'i', SPACE_DIM > i
Definition: hcurl_divergence_operator_2d.cpp:27
MoFEM::SchurElemMatsPreconditionedBlock
Definition: Schur.cpp:2914
MoFEM::OpSchurAssembleEndImpl::symSchur
std::vector< bool > symSchur
Definition: Schur.cpp:132
field_name
constexpr auto field_name
Definition: poisson_2d_homogeneous.cpp:13
MoFEM::SchurElemMats::schurElemMats
static boost::ptr_vector< SchurElemMats > schurElemMats
Definition: Schur.cpp:226
MoFEM::SchurElemMats::MatSetValues
static MoFEMErrorCode MatSetValues(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2852
MoFEM::SchurDSYSV::invertMat
static auto invertMat(const SchurElemMats *row_ptr, MatrixDouble &inv, double eps)
Definition: Schur.cpp:1114
MoFEM::OpSchurAssembleBase
Definition: Schur.hpp:40
MoFEM::setSchurMatSolvePC
MoFEMErrorCode setSchurMatSolvePC(SmartPetscObj< PC > pc)
Definition: Schur.cpp:2823
convert.n
n
Definition: convert.py:82
MoFEM::SchurElemMats
Schur complement data storage.
Definition: Schur.cpp:173
MoFEM::OpSchurAssembleEndImpl::fieldsName
std::vector< std::string > fieldsName
Definition: Schur.cpp:128
MoFEM::setSchurA00MatSolvePC
MoFEMErrorCode setSchurA00MatSolvePC(SmartPetscObj< PC > pc)
Set PC for A00 block.
Definition: Schur.cpp:2805
N
const int N
Definition: speed_test.cpp:3
v
const double v
phase velocity of light in medium (cm/ns)
Definition: initial_diffusion.cpp:40
MoFEM::setSchurBlockMatOps
static MoFEMErrorCode setSchurBlockMatOps(Mat mat_raw)
Definition: Schur.cpp:1486
MoFEM::OpSchurAssembleEndImpl::OpSchurAssembleEndImpl
OpSchurAssembleEndImpl(std::vector< std::string > fields_name, std::vector< boost::shared_ptr< Range >> field_ents, std::vector< SmartPetscObj< AO >> sequence_of_aos, std::vector< SmartPetscObj< Mat >> sequence_of_mats, std::vector< bool > sym_schur, bool symm_op=true, boost::shared_ptr< BlockStructure > diag_blocks=nullptr)
Construct a new Op Schur Assemble End object.
Definition: Schur.cpp:578
MoFEM::EssentialBcStorage
[Storage and set boundary conditions]
Definition: FormsIntegrators.hpp:14
MoFEM::OpSchurAssembleEndImpl
Assemble Schur complement (Implementation)
Definition: Schur.cpp:74
MOFEM_TAG_AND_LOG
#define MOFEM_TAG_AND_LOG(channel, severity, tag)
Tag and log in channel.
Definition: LogManager.hpp:362
MoFEM::getInterfacePtr
auto getInterfacePtr(DM dm)
Get the Interface Ptr object.
Definition: DMMoFEM.hpp:1033
MOFEM_LOG
#define MOFEM_LOG(channel, severity)
Log.
Definition: LogManager.hpp:308
MoFEM::vectorDuplicate
SmartPetscObj< Vec > vectorDuplicate(Vec vec)
Create duplicate vector of smart vector.
Definition: PetscSmartObj.hpp:221
MoFEM::DiagBlockIndex::Indexes::loc_row
int loc_row
Definition: Schur.cpp:270
MoFEM::shell_block_mat_asmb_wrap
MoFEMErrorCode shell_block_mat_asmb_wrap(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2205
MoFEM::DiagBlockInvStruture::indexView
A00SolverView indexView
Definition: Schur.cpp:345
MoFEM::MatSetValues< SchurElemMats >
MoFEMErrorCode MatSetValues< SchurElemMats >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2831
MoFEM::OpSchurAssembleEndImpl::invDiagOffMat
MatrixDouble invDiagOffMat
Definition: Schur.cpp:136
MoFEM::Core
CoreTmp< 0 > Core
Definition: Core.hpp:1140
std
Definition: enable_if.hpp:5
HenckyOps::f
auto f
Definition: HenckyOps.hpp:15
MoFEM::Types::VectorInt
UBlasVector< int > VectorInt
Definition: Types.hpp:67
eps
static const double eps
Definition: check_base_functions_derivatives_on_tet.cpp:11
MoFEM::SchurElemMats::uidRow
UId uidRow
Definition: Schur.cpp:179
MoFEM::OpSchurAssembleEndImpl::diagBlocks
boost::shared_ptr< BlockStructure > diagBlocks
Definition: Schur.cpp:140
MoFEM::OpSchurAssembleEnd
Assemble Schur complement.
Definition: Schur.cpp:151
UBlasVector< double >
MoFEM::Exceptions::ierr
static MoFEMErrorCodeGeneric< PetscErrorCode > ierr
Definition: Exceptions.hpp:76
MoFEM::getProblemPtr
auto getProblemPtr(DM dm)
get problem pointer from DM
Definition: DMMoFEM.hpp:1044
EigenMatrix::Vec
const FTensor::Tensor2< T, Dim, Dim > Vec
Definition: MatrixFunction.hpp:66
MOFEM_DATA_INCONSISTENCY
@ MOFEM_DATA_INCONSISTENCY
Definition: definitions.h:31
sdf_hertz_2d_axisymm_plane.d
float d
Definition: sdf_hertz_2d_axisymm_plane.py:4
FTensor::transform
Tensor2_Expr< transform_Tensor2< A, B, T, Dim0, Dim1, i, j >, T, Dim0, Dim1, i, j > transform(const Tensor2_Expr< A, T, Dim0, Dim1, i, j > &a, B function)
Definition: Tensor2_transform.hpp:27
MoFEM::SchurShellMatData
std::pair< SmartPetscObj< Mat >, boost::shared_ptr< BlockStructure > > SchurShellMatData
Definition: Schur.hpp:112
MoFEM::OpSchurAssembleEndImpl::transOffMatInvDiagOffMat
MatrixDouble transOffMatInvDiagOffMat
Definition: Schur.cpp:138
MoFEM::DiagBlockIndex::Indexes::getNbCols
int getNbCols() const
Definition: Schur.cpp:251
m
FTensor::Index< 'm', 3 > m
Definition: shallow_wave.cpp:80
MoFEM::BlockStructure::multiplyByPreconditioner
bool multiplyByPreconditioner
Definition: Schur.cpp:331
MoFEM::OpSchurAssembleBaseImpl
Definition: Schur.cpp:14
MoFEM::SchurElemMats::locMats
static boost::ptr_vector< MatrixDouble > locMats
Definition: Schur.cpp:223
MoFEM::createBlockMatStructure
boost::shared_ptr< BlockStructure > createBlockMatStructure(DM dm, SchurFEOpsFEandFields schur_fe_op_vec)
Create a Mat Diag Blocks object.
Definition: Schur.cpp:1196
MoFEMFunctionBeginHot
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:453
MoFEM::DiagBlockInvStruture::A00SolverView::diagUpRange
std::vector< int > diagUpRange
Definition: Schur.cpp:342
sdf_wavy_2d.ind
float ind
Definition: sdf_wavy_2d.py:8
MoFEM::SmartPetscObj< AO >
MoFEM::MatSetValues< SchurElemMatsBlock >
MoFEMErrorCode MatSetValues< SchurElemMatsBlock >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2907
MoFEM::ent_form_type_and_id
auto ent_form_type_and_id(const EntityType type, const EntityID id)
get entity handle from type and id
Definition: Templates.hpp:1884
MoFEM::BlockStructure
Definition: Schur.cpp:321
MoFEM::SchurDGESV
Definition: Schur.cpp:1147
MoFEM::SchurEvents::MOFEM_EVENT_zeroRowsAndCols
static PetscLogEvent MOFEM_EVENT_zeroRowsAndCols
Definition: Schur.hpp:32
MoFEM::schurSaveBlockMesh
MoFEMErrorCode schurSaveBlockMesh(boost::shared_ptr< BlockStructure > block_mat_data, std::string filename)
Save block matrix as a mesh.
Definition: Schur.cpp:2962
MoFEM::SchurElemMats::getRowInd
auto & getRowInd() const
Definition: Schur.cpp:183
MoFEM::FieldEntity::getFieldBitNumberFromUniqueId
static auto getFieldBitNumberFromUniqueId(const UId uid)
Get the Field Bit Number From Unique Id.
Definition: FieldEntsMultiIndices.hpp:205
convert.int
int
Definition: convert.py:64
MoFEMFunctionReturn
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:429
MoFEM::OpSchurAssembleEndImpl::sequenceOfAOs
std::vector< SmartPetscObj< AO > > sequenceOfAOs
Definition: Schur.cpp:130
MOFEM_NOT_IMPLEMENTED
@ MOFEM_NOT_IMPLEMENTED
Definition: definitions.h:32
MoFEM::DiagBlockIndex::Indexes::getInvShift
int & getInvShift() const
Definition: Schur.cpp:255
MoFEM::DiagBlockInvStruture::A00SolverView::lowView
std::vector< const Indexes * > lowView
Definition: Schur.cpp:339
MoFEM::SchurEvents::MOFEM_EVENT_BlockStructureSetValues
static PetscLogEvent MOFEM_EVENT_BlockStructureSetValues
Definition: Schur.hpp:29
MoFEMFunctionBegin
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:359
MoFEM::SchurFEOpsFEandFields
std::vector< std::pair< std::string, std::vector< SchurFieldPair > > > SchurFEOpsFEandFields
Definition: Schur.hpp:97
MoFEM::OpSchurAssembleEndImpl::doWorkImpl
MoFEMErrorCode doWorkImpl(int side, EntityType type, EntitiesFieldData::EntData &data)
Definition: Schur.cpp:591
MoFEM::isAllGather
auto isAllGather(IS is)
IS All gather.
Definition: PetscSmartObj.hpp:300
MoFEM::getNestSchurData
boost::shared_ptr< NestSchurData > getNestSchurData(std::pair< SmartPetscObj< DM >, SmartPetscObj< DM >> dms, boost::shared_ptr< BlockStructure > block_mat_data_ptr, std::vector< std::string > fields_names, std::vector< boost::shared_ptr< Range >> field_ents, bool add_preconditioner_block)
Get the Schur Nest Mat Array object.
Definition: Schur.cpp:2249