v0.14.0
Schur.cpp
Go to the documentation of this file.
1 /**
2  * @file Schur.cpp
3  * @brief Implementation of Schur Complement
4  * @date 2023-02-03
5  *
6  * @copyright Copyright (c) 2023
7  *
8  */
9 
10 namespace MoFEM {
11 
12 constexpr bool debug_schur = false;
13 
15  OpSchurAssembleBaseImpl() = delete;
16 
17 protected:
19 
20  /**
21  * @brief Assemble Schur complement
22  *
23  */
25 
26  Mat S,
27 
28  const UId &uid_row, VectorInt &row_ind,
29 
30  const UId &uid_col, VectorInt &col_ind,
31 
32  MatrixDouble &m, InsertMode iora
33 
34  );
35 };
36 
37 // Assemble specialisations
39 
40  Mat S,
41 
42  const UId &uid_row, VectorInt &row_ind,
43 
44  const UId &uid_col, VectorInt &col_ind,
45 
46  MatrixDouble &m, InsertMode iora
47 
48 ) {
50 
51  S, row_ind.size(), &*row_ind.begin(), col_ind.size(), &*col_ind.begin(),
52  &*m.data().begin(), iora
53 
54  );
55 }
56 
57 /**
58  * @brief Clear Schur complement internal data
59  *
60  */
62 
64 
65  MoFEMErrorCode doWork(int side, EntityType type,
67 };
68 
70 
72  boost::shared_ptr<std::vector<unsigned char>> marker_ptr,
73  double diag_val);
74 
75  MoFEMErrorCode doWork(int side, EntityType type,
77 
78 private:
79  boost::shared_ptr<std::vector<unsigned char>> markerPtr;
80  double diagVal;
81 };
82 
83 /**
84  * @brief Assemble Schur complement (Implementation)
85  *
86  */
87 template <typename OP_SCHUR_ASSEMBLE_BASE>
89 
91 
92  /**
93  * @brief Construct a new Op Schur Assemble End object
94  *
95  * @param fields_name list of fields
96  * @param field_ents list of entities on which schur complement is applied
97  * (can be empty)
98  * @param schur_ao map indices to schur matrix indices
99  * @param schur_mat schur matrix
100  * @param sym_schur true if schur is symmetric
101  * @param symm_op true if block diagonal is symmetric
102  */
104 
105  std::vector<std::string> fields_name,
106  std::vector<boost::shared_ptr<Range>> field_ents,
107 
108  SmartPetscObj<AO> schur_ao, SmartPetscObj<Mat> schur_mat,
109 
110  bool sym_schur, bool symm_op);
111 
112 protected:
113  template <typename I>
115 
116  int side, EntityType type, EntitiesFieldData::EntData &data
117 
118  );
119 
120  std::vector<std::string> fieldsName;
121  std::vector<boost::shared_ptr<Range>> fieldEnts;
124  bool symSchur;
125 
129 };
130 
131 struct SchurDSYSV; ///< SY symmetric
132 struct SchurDGESV; ///< GE general (i.e., nonsymmetric, in some cases
133  ///< rectangular)
134 
135 /**
136  * @brief Assemble Schur complement
137  *
138  */
139 template <typename I> struct OpSchurAssembleEnd;
140 
141 template <>
143  : public OpSchurAssembleEndImpl<OpSchurAssembleBaseImpl> {
145  MoFEMErrorCode doWork(int side, EntityType type,
147 };
148 
149 template <>
151  : public OpSchurAssembleEndImpl<OpSchurAssembleBaseImpl> {
153  MoFEMErrorCode doWork(int side, EntityType type,
155 };
156 
157 /**
158  * @brief Schur complement data storage
159  *
160  */
161 struct SchurElemMats : public boost::enable_shared_from_this<SchurElemMats> {
162 
163  SchurElemMats(const size_t idx, const UId uid_row, const UId uid_col);
164  virtual ~SchurElemMats() = default;
165 
166  const size_t iDX;
169 
170  inline auto &getMat() const { return locMats[iDX]; }
171  inline auto &getRowInd() const { return rowIndices[iDX]; }
172  inline auto &getColInd() const { return colIndices[iDX]; }
173 
174  static MoFEMErrorCode MatSetValues(Mat M,
175  const EntitiesFieldData::EntData &row_data,
176  const EntitiesFieldData::EntData &col_data,
177  const MatrixDouble &mat, InsertMode iora);
178 
179 protected:
180  static MoFEMErrorCode
182  const EntitiesFieldData::EntData &col_data,
183  const MatrixDouble &mat, InsertMode iora);
184 
185  friend struct OpSchurAssembleBegin;
186  friend struct OpSchurZeroRowsAndCols;
187  template <typename OP_SCHUR_ASSEMBLE_BASE>
188  friend struct OpSchurAssembleEndImpl;
189 
190  struct uid_mi_tag {};
191  struct col_mi_tag {};
192 
193  using SchurElemStorage = multi_index_container<
194  const SchurElemMats *,
195  indexed_by<
196 
197  ordered_unique<
198  tag<uid_mi_tag>,
199  composite_key<
201 
202  member<SchurElemMats, const UId, &SchurElemMats::uidRow>,
203  member<SchurElemMats, const UId, &SchurElemMats::uidCol>
204 
205  >>,
206 
207  ordered_non_unique<tag<col_mi_tag>, member<SchurElemMats, const UId,
209 
210  >>;
211 
212  static boost::ptr_vector<MatrixDouble> locMats;
213  static boost::ptr_vector<VectorInt> rowIndices;
214  static boost::ptr_vector<VectorInt> colIndices;
215  static boost::ptr_vector<SchurElemMats> schurElemMats;
216  static size_t maxIndexCounter;
217 
219 };
220 
222 
223  virtual ~DiagBlockIndex() = default;
224 
225  /**
226  * @brief block data indexes
227  *
228  */
229  struct Indexes {
230 
232  int nb_cols, int loc_row, int loc_col, int mat_shift)
236 
237  inline UId getRowUId() const { return uid_row; }
238  inline UId getColUId() const { return uid_col; }
239  inline UId getFEUId() const { return uid_fe; }
240  inline int getRow() const { return row; }
241  inline int getCol() const { return col; }
242  inline int getNbRows() const { return nb_rows; }
243  inline int getNbCols() const { return nb_cols; }
244  inline int getLocRow() const { return loc_row; }
245  inline int getLocCol() const { return loc_col; }
246  inline int &getMatShift() const { return mat_shift; }
247 
248  inline int rowShift() const {
249  return getRow() + getNbRows();
250  } // shift such that lower bound is included
251 
252  inline int colShift() const {
253  return getCol() + getNbCols();
254  } // shift such that lower bound is included
255 
256  private:
260  int row;
261  int col;
262  int nb_rows;
263  int nb_cols;
264  int loc_row;
265  int loc_col;
266  mutable int mat_shift;
267  };
268 
269  using BlockIndex = multi_index_container<
270 
271  Indexes,
272 
273  indexed_by<
274 
275  ordered_unique<
276 
277  composite_key<Indexes,
278 
279  const_mem_fun<Indexes, UId, &Indexes::getRowUId>,
280  const_mem_fun<Indexes, UId, &Indexes::getColUId>>
281 
282  >,
283 
284  ordered_non_unique<
285 
286  const_mem_fun<Indexes, UId, &Indexes::getFEUId>
287 
288  >,
289 
290  ordered_non_unique<
291 
292  const_mem_fun<Indexes, UId, &Indexes::getRowUId>
293 
294  >,
295 
296  ordered_non_unique<
297 
298  const_mem_fun<Indexes, UId, &Indexes::getColUId>
299 
300  >>>;
301 
302  BlockIndex blockIndex; ///< blocks indexes storage
303 };
304 
306 
309 
310  boost::shared_ptr<std::vector<double>> dataBlocksPtr;
311  boost::shared_ptr<std::vector<double>> preconditionerBlocksPtr;
312  boost::shared_ptr<std::vector<double>> parentBlockStructurePtr;
313 
315 };
316 
324 
326  PetscLogEventRegister("schurMatSetVal", 0, &MOFEM_EVENT_schurMatSetValues);
327  PetscLogEventRegister("opSchurAsmEnd", 0, &MOFEM_EVENT_opSchurAssembleEnd);
328  PetscLogEventRegister("blockSetVal", 0, &MOFEM_EVENT_BlockStructureSetValues);
329  PetscLogEventRegister("blockMult", 0, &MOFEM_EVENT_BlockStructureMult);
330  PetscLogEventRegister("blockSolve", 0, &MOFEM_EVENT_BlockStructureSolve);
331  PetscLogEventRegister("schurZeroRandC", 0, &MOFEM_EVENT_zeroRowsAndCols);
332  PetscLogEventRegister("assembleSchurMat", 0, &MOFEM_EVENT_AssembleSchurMat);
333 }
334 
336 boost::ptr_vector<MatrixDouble> SchurElemMats::locMats;
337 boost::ptr_vector<VectorInt> SchurElemMats::rowIndices;
338 boost::ptr_vector<VectorInt> SchurElemMats::colIndices;
339 boost::ptr_vector<SchurElemMats> SchurElemMats::schurElemMats;
341 
342 SchurElemMats::SchurElemMats(const size_t idx, const UId uid_row,
343  const UId uid_col)
344  : iDX(idx), uidRow(uid_row), uidCol(uid_col) {}
345 
348  const EntitiesFieldData::EntData &col_data,
349  const MatrixDouble &mat, InsertMode iora) {
351 
352 #ifndef NDEBUG
353  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_schurMatSetValues, 0, 0, 0, 0);
354 #endif // NDEBUG
355 
356  // get row indices, in case of store, get indices from storage
357  // storage keeps marked indices to manage boundary conditions
358  auto get_row_indices = [&]() -> const VectorInt & {
359  if (auto e_ptr = row_data.getFieldEntities()[0]) {
360  if (auto stored_data_ptr =
361  e_ptr->getSharedStoragePtr<EssentialBcStorage>()) {
362  return stored_data_ptr->entityIndices;
363  }
364  }
365  return row_data.getIndices();
366  };
367 
368  const auto &row_ind = get_row_indices();
369  const auto &col_ind = col_data.getIndices();
370 
371  const auto nb_rows = row_ind.size();
372  const auto nb_cols = col_ind.size();
373 
374 #ifndef NDEBUG
375  if (mat.size1() != nb_rows) {
376  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
377  "Wrong mat size %d != %d", mat.size1(), nb_rows);
378  }
379  if (mat.size2() != nb_cols) {
380  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
381  "Wrong mat size %d != %d", mat.size2(), nb_cols);
382  }
383 #endif // NDEBUG
384 
385  // get entity uid
386  auto get_uid = [](auto &data) {
387  if (data.getFieldEntities().size() == 1) {
388 
389  return data.getFieldEntities()[0]->getLocalUniqueId();
390 
391  } else {
392 
393  // Is assumed that sum of entities ids gives unique id, that is not true,
394  // but corner case is improbable.
395 
396  // @todo: debug version should have test
397 
398  auto &uid0 = data.getFieldEntities()[0]->getLocalUniqueId();
399  auto field_id0 = FieldEntity::getFieldBitNumberFromUniqueId(uid0);
400  auto ent0 = FieldEntity::getHandleFromUniqueId(uid0);
401  auto type0 = type_from_handle(ent0);
402  auto id = id_from_handle(ent0);
403 
404  for (auto i = 1; i < data.getFieldEntities().size(); ++i) {
405 
406  // get entity id from ent
407  id += id_from_handle(
408 
409  // get entity handle from unique uid
411  data.getFieldEntities()[i]->getLocalUniqueId())
412 
413  );
414  }
415 
417  field_id0,
418 
419  ent_form_type_and_id(type0, id)
420 
421  );
422  }
423  };
424 
425  auto uid_row = get_uid(row_data);
426  auto uid_col = get_uid(col_data);
427 
428  auto it =
429  SchurElemMats::schurL2Storage.template get<SchurElemMats::uid_mi_tag>()
430  .find(boost::make_tuple(uid_row, uid_col));
431 
432  if (it ==
433  SchurElemMats::schurL2Storage.template get<SchurElemMats::uid_mi_tag>()
434  .end()) {
435 
436  // get size of arrays of matrices
437  const auto size = SchurElemMats::locMats.size();
438 
439  // expand memory allocation
440  if (SchurElemMats::maxIndexCounter == size) {
441  SchurElemMats::locMats.push_back(new MatrixDouble());
442  SchurElemMats::rowIndices.push_back(new VectorInt());
443  SchurElemMats::colIndices.push_back(new VectorInt());
445  new SchurElemMats(SchurElemMats::maxIndexCounter, uid_row, uid_col));
446  } else {
448  uid_row;
450  uid_col;
451  }
452 
453  // add matrix to storage
454  auto p = SchurElemMats::schurL2Storage.emplace(
456 #ifndef NDEBUG
457  if (!p.second) {
458  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "Failed to insert");
459  }
460 #endif // NDEBUG
461 
462  auto asmb = [&](auto &sm) {
463  sm.resize(nb_rows, nb_cols, false);
464  noalias(sm) = mat;
465  };
466 
467  asmb((*p.first)->getMat());
468 
469  auto add_indices = [](auto &storage, auto &ind) {
470  storage.resize(ind.size(), false);
471  noalias(storage) = ind;
472  };
473 
474  add_indices((*p.first)->getRowInd(), row_ind);
475  add_indices((*p.first)->getColInd(), col_ind);
476 
477  } else {
478  // entry (submatrix) already exists
479 
480  auto asmb = [&](auto &sm) {
482 
483 #ifndef NDEBUG
484  if (sm.size1() != nb_rows) {
485  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
486  "Wrong mat or storage size %d != %d", sm.size1(), nb_rows);
487  }
488  if (sm.size2() != nb_cols) {
489  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
490  "Wrong mat or storage size %d != %d", sm.size2(), nb_cols);
491  }
492 #endif // NDEBUG
493 
494  switch (iora) {
495  case ADD_VALUES:
496  sm += mat;
497  break;
498  case INSERT_VALUES:
499  noalias(sm) = mat;
500  break;
501  default:
502  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED,
503  "Assembly type not implemented");
504  }
506  };
507 
508  CHKERR asmb((*it)->getMat());
509 
510  // no need to set indices
511  }
512 
513 #ifndef NDEBUG
514  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_schurMatSetValues, 0, 0, 0, 0);
515 #endif // NDEBUG
516 
518 }
519 
521  : OpSchurAssembleBaseImpl(NOSPACE, OPSPACE) {}
522 
526 #ifndef NDEBUG
527  if constexpr (debug_schur)
528  MOFEM_LOG("SELF", Sev::noisy) << "Schur assemble begin";
529 #endif
532 
534 }
535 
537  boost::shared_ptr<std::vector<unsigned char>> marker_ptr, double diag_val)
538  : OpSchurAssembleBaseImpl(NOSPACE, OPSPACE), markerPtr(marker_ptr),
539  diagVal(diag_val) {}
540 
545 
546  auto fe_ptr = getFEMethod();
547  auto prb_ptr = fe_ptr->problemPtr;
548  auto &dof = prb_ptr->getNumeredRowDofsPtr()->get<PetscGlobalIdx_mi_tag>();
549 
550  auto find_loc_dof_index = [&](auto glob) {
551  auto it = dof.find(glob);
552  if (it != dof.end()) {
553  return (*it)->getPetscLocalDofIdx();
554  }
555  MOFEM_LOG("SELF", Sev::error) << "Global dof " << glob;
558  "Global dof index not found in local numered dofs storage");
559  return -1;
560  };
561 
562  for (auto &s : SchurElemMats::schurL2Storage) {
563 
564  auto zero_ind = [&](auto &ind) {
565  for (auto i = 0; i < ind.size(); ++i) {
566  if (ind[i] >= 0) {
567  if ((*markerPtr)[find_loc_dof_index(ind[i])]) {
568  ind[i] = -1;
569  }
570  }
571  }
572  };
573 
574  zero_ind(s->getRowInd());
575  zero_ind(s->getColInd());
576 
577  }
578 
580 };
581 
582 template <typename OP_SCHUR_ASSEMBLE_BASE>
584  std::vector<std::string> fields_name,
585  std::vector<boost::shared_ptr<Range>> field_ents,
586  SmartPetscObj<AO> schur_ao, SmartPetscObj<Mat> schur_mat, bool sym_schur,
587  bool symm_op)
588  : OP(NOSPACE, OP::OPSPACE, symm_op), fieldsName(fields_name),
589  fieldEnts(field_ents), schurAO(schur_ao), schurMat(schur_mat),
590  symSchur(sym_schur) {}
591 
592 template <typename OP_SCHUR_ASSEMBLE_BASE>
593 template <typename I>
595  int side, EntityType type, EntitiesFieldData::EntData &data) {
596 
598 
599 #ifndef NDEBUG
600  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_opSchurAssembleEnd, 0, 0, 0, 0);
601 #endif
602 
603 #ifndef NDEBUG
604  if constexpr (debug_schur)
605  MOFEM_LOG("SELF", Sev::noisy) << "Schur assemble begin -> end";
606 #endif
607 
608  auto get_a00_uids = [&]() {
609  auto get_field_bit = [&](auto &name) {
610  return OP::getPtrFE()->mField.get_field_bit_number(name);
611  };
612 
613  std::vector<std::pair<UId, UId>> a00_uids;
614  a00_uids.reserve(fieldsName.size());
615  for (auto ss = 0; ss != fieldsName.size(); ++ss) {
616  auto field_bit = get_field_bit(fieldsName[ss]);
617  auto row_ents = fieldEnts[ss];
618  if (row_ents) {
619  for (auto p = row_ents->pair_begin(); p != row_ents->pair_end(); ++p) {
620  auto lo_uid =
621  FieldEntity::getLoLocalEntityBitNumber(field_bit, p->first);
622  auto hi_uid =
623  FieldEntity::getHiLocalEntityBitNumber(field_bit, p->second);
624  a00_uids.push_back(std::make_pair(lo_uid, hi_uid));
625  }
626  } else {
627  auto lo_uid = FieldEntity::getLoLocalEntityBitNumber(
628  field_bit, get_id_for_min_type<MBVERTEX>());
629  auto hi_uid = FieldEntity::getHiLocalEntityBitNumber(
630  field_bit, get_id_for_max_type<MBENTITYSET>());
631  a00_uids.push_back(std::make_pair(lo_uid, hi_uid));
632  }
633  }
634  return a00_uids;
635  };
636 
637 #ifndef NDEBUG
638  auto get_field_name = [&](auto uid) {
639  return OP::getPtrFE()->mField.get_field_name(field_bit_from_bit_number(
640  FieldEntity::getFieldBitNumberFromUniqueId(uid)));
641  };
642  auto list_storage = [&](auto &storage) {
644  int i = 0;
645  for (auto &p : storage) {
646  MOFEM_LOG("SELF", Sev::noisy)
647  << "List schur storage: " << i << " " << p->iDX << ": "
648  << get_field_name(p->uidRow) << " " << get_field_name(p->uidCol)
649  << " : " << p->getMat().size1() << " " << p->getMat().size2();
650  ++i;
651  }
653  };
654 #endif // NDEBUG
655 
656  auto assemble_dense_blocks = [&]() {
657  using matrix_range = ublas::matrix_range<MatrixDouble>;
658  using range = ublas::range;
660  auto &storage = SchurElemMats::schurL2Storage;
661 
662  auto assemble_schur = [this](auto &m, auto &uid_row, auto &uid_col,
663  auto *row_ind_ptr, auto *col_ind_ptr) {
665 
666  if (schurMat) {
667 
668  if (auto ierr = this->assembleSchurMat(
669 
670  schurMat, uid_row, *row_ind_ptr, uid_col, *col_ind_ptr, m,
671  ADD_VALUES
672 
673  )) {
674 #ifndef NDEBUG
675  auto field_ents = OP::getPtrFE()->mField.get_field_ents();
676  auto row_ent_it = field_ents->find(uid_row);
677  auto col_ent_it = field_ents->find(uid_col);
678  MOFEM_LOG_CHANNEL("SELF");
679  if (row_ent_it != field_ents->end())
680  MOFEM_LOG("SELF", Sev::error)
681  << "Assemble row entity: " << (*row_ent_it)->getName() << " "
682  << (*col_ent_it)->getEntTypeName() << " side "
683  << (*row_ent_it)->getSideNumber();
684  if (col_ent_it != field_ents->end())
685  MOFEM_LOG("SELF", Sev::error)
686  << "Assemble col entity: " << (*col_ent_it)->getName() << " "
687  << (*col_ent_it)->getEntTypeName() << " side "
688  << (*col_ent_it)->getSideNumber();
689 #endif // NDEBUG
690  CHK_THROW_MESSAGE(ierr, "MatSetValues");
691  }
692  }
693 
695  };
696 
697  auto a00_uids = get_a00_uids();
698 
699  auto get_block_indexing = [&](auto &a00_uids) {
700  // iterate over a00 uids and find blocks
701  std::vector<const SchurElemMats *> block_list;
702  block_list.reserve(storage.size());
703  for (auto &rp_uid : a00_uids) {
704  auto [rlo_uid, rhi_uid] = rp_uid;
705  for (auto &cp_uid : a00_uids) {
706  auto [clo_uid, chi_uid] = cp_uid;
707 
708  auto it =
709  storage.template get<SchurElemMats::uid_mi_tag>().lower_bound(
710  boost::make_tuple(rlo_uid, clo_uid));
711  auto hi_it =
712  storage.template get<SchurElemMats::uid_mi_tag>().upper_bound(
713  boost::make_tuple(rhi_uid, chi_uid));
714 
715  for (; it != hi_it; ++it) {
716  if ((*it)->uidRow >= rlo_uid && (*it)->uidRow < rhi_uid &&
717  (*it)->uidCol >= clo_uid && (*it)->uidCol < chi_uid) {
718  block_list.push_back(*it);
719  }
720  }
721  }
722  }
723 
724  // create block indexes map for blockMat
725  std::map<UId, std::pair<size_t, const VectorInt *>>
726  block_indexing; // uid block map
727  for (auto d : block_list) {
728  if (block_indexing.find(d->uidRow) == block_indexing.end()) {
729  block_indexing[d->uidRow] =
730  std::make_pair(d->getRowInd().size(), &(d->getRowInd()));
731  }
732  }
733 
734  // set indexes to block
735  int mat_block_size = 0; // size of block matrix
736  for (auto &p_uid : a00_uids) {
737  auto [lo_uid, hi_uid] = p_uid;
738  auto lo = block_indexing.lower_bound(lo_uid);
739  auto up = block_indexing.upper_bound(hi_uid);
740  for (; lo != up; ++lo) {
741  lo->second.first = mat_block_size;
742  mat_block_size += lo->second.second->size();
743  }
744  }
745 
746  return std::make_tuple(block_list, block_indexing, mat_block_size);
747  };
748 
749  auto get_schur_block_list = [&](auto &block_indexing) {
750  std::vector<const SchurElemMats *> block_list;
751  block_list.reserve(storage.size());
752  for (auto &s : storage) {
753  if (block_indexing.find(s->uidRow) == block_indexing.end() &&
754  block_indexing.find(s->uidCol) == block_indexing.end()) {
755  block_list.push_back(s);
756  }
757  }
758  return block_list;
759  };
760 
761  auto bi = get_block_indexing(a00_uids);
762  auto &[block_list, block_indexing, block_mat_size] = bi;
763 
764  if (block_mat_size == 0) {
765 
766  if (schurAO) {
767  for (auto &s : storage) {
768  CHKERR AOApplicationToPetsc(schurAO, s->getRowInd().size(),
769  &*s->getRowInd().begin());
770  CHKERR AOApplicationToPetsc(schurAO, s->getColInd().size(),
771  &*s->getColInd().begin());
772  }
773  }
774 
775  for (auto &s : storage) {
776  auto &m = s->getMat();
777  CHKERR assemble_schur(m, s->uidRow, s->uidCol, &(s->getRowInd()),
778  &(s->getColInd()));
779  }
781  }
782 
783  blockMat.resize(block_mat_size, block_mat_size, false);
784  blockMat.clear();
785 
786  auto get_range = [](auto &bi) {
787  return range(bi.first, bi.first + bi.second->size());
788  };
789 
790  for (auto &s : block_list) {
791  auto &m = s->getMat();
792 #ifndef NDEBUG
793  if (block_indexing.find(s->uidRow) == block_indexing.end())
794  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "Wrong rlo_uid");
795  if (block_indexing.find(s->uidCol) == block_indexing.end())
796  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "Wrong clo_uid");
797 #endif // NDEBUG
798 
799  auto &rbi = block_indexing.at(s->uidRow);
800  auto &cbi = block_indexing.at(s->uidCol);
801 
802  auto sub_mat = matrix_range(blockMat, get_range(rbi), get_range(cbi));
803  sub_mat = m;
804  }
805 
806  auto get_zeroed_indices = [&](auto extractor_uid, auto extractor_ind) {
807  auto &[block_list, block_indexing, block_mat_size] = bi;
808  std::vector<int> zeroed_indices;
809  zeroed_indices.reserve(block_mat_size);
810  for (auto &s : block_list) {
811  auto &bi = block_indexing.at(extractor_uid(s));
812  auto &ind = extractor_ind(s);
813  for (auto it = ind.begin(); it != ind.end(); ++it) {
814  if (*it < 0) {
815  auto idx = bi.first + std::distance(ind.begin(), it);
816  zeroed_indices.push_back(idx);
817  }
818  }
819  }
820  std::sort(zeroed_indices.begin(), zeroed_indices.end());
821  auto it = std::unique(zeroed_indices.begin(), zeroed_indices.end());
822  zeroed_indices.resize(std::distance(zeroed_indices.begin(), it));
823  return zeroed_indices;
824  };
825  auto zero_rows = get_zeroed_indices(
826  [](auto &s) { return s->uidRow; },
827  [](auto &s) -> VectorInt & { return s->getRowInd(); });
828  auto zero_cols = get_zeroed_indices(
829  [](auto &s) { return s->uidCol; },
830  [](auto &s) -> VectorInt & { return s->getColInd(); });
831 
832  for (auto i : zero_rows) {
833  for (auto j = 0; j != blockMat.size2(); ++j) {
834  blockMat(i, j) = 0;
835  }
836  }
837  for (auto j : zero_cols) {
838  for (auto i = 0; i != blockMat.size1(); ++i) {
839  blockMat(i, j) = 0;
840  }
841  }
842  for (auto i : zero_rows) {
843  blockMat(i, i) = 1;
844  }
845 
846  CHKERR I::invertMat(blockMat, invMat);
847 
848  // clear storage and block list from a00 blocks, no more needed
849  for (auto &s : block_list) {
850  auto it = storage.template get<SchurElemMats::uid_mi_tag>().find(
851  boost::make_tuple(s->uidRow, s->uidCol));
852  storage.template get<SchurElemMats::uid_mi_tag>().erase(it);
853  }
854  block_list.clear();
855 
856  if (schurAO) {
857  for (auto &s : storage) {
858  if (block_indexing.find(s->uidRow) == block_indexing.end()) {
859  CHKERR AOApplicationToPetsc(schurAO, s->getRowInd().size(),
860  &*s->getRowInd().begin());
861  }
862  if (block_indexing.find(s->uidCol) == block_indexing.end()) {
863  CHKERR AOApplicationToPetsc(schurAO, s->getColInd().size(),
864  &*s->getColInd().begin());
865  }
866  }
867  }
868 
869  auto schur_block_list = get_schur_block_list(block_indexing);
870  for (auto &s : schur_block_list) {
871  auto &m = s->getMat();
872  CHKERR assemble_schur(m, s->uidRow, s->uidCol, &(s->getRowInd()),
873  &(s->getColInd()));
874  }
875  for (auto &s : schur_block_list) {
876  auto it = storage.template get<SchurElemMats::uid_mi_tag>().find(
877  boost::make_tuple(s->uidRow, s->uidCol));
878  storage.template get<SchurElemMats::uid_mi_tag>().erase(it);
879  }
880  schur_block_list.clear();
881 
882  for (
883 
884  auto rp_uid_it = a00_uids.begin(); rp_uid_it != a00_uids.end();
885  ++rp_uid_it
886 
887  ) {
888  auto [rlo_uid, rhi_uid] = *rp_uid_it;
889  for (auto rm = block_indexing.lower_bound(rlo_uid);
890  rm != block_indexing.upper_bound(rhi_uid); ++rm) {
891  auto &rbi = rm->second;
892 
893  auto a_lo_tmp =
894  storage.template get<SchurElemMats::col_mi_tag>().lower_bound(
895  rm->first);
896  auto a_hi =
897  storage.template get<SchurElemMats::col_mi_tag>().upper_bound(
898  rm->first);
899 
900  for (
901 
902  auto cp_uid_it = (symSchur) ? rp_uid_it : a00_uids.begin();
903  cp_uid_it != a00_uids.end(); ++cp_uid_it
904 
905  ) {
906  auto [clo_uid, chi_uid] = *cp_uid_it;
907  for (auto cm = block_indexing.lower_bound(clo_uid);
908  cm != block_indexing.upper_bound(chi_uid); ++cm) {
909  auto &cbi = cm->second;
910 
911  auto c_lo_tmp =
912  storage.template get<SchurElemMats::uid_mi_tag>().lower_bound(
913  boost::make_tuple(cm->first, 0));
914  auto c_hi =
915  storage.template get<SchurElemMats::uid_mi_tag>().upper_bound(
916  boost::make_tuple(cm->first, FieldEntity::getHiBitNumberUId(
917  BITFIELDID_SIZE - 1)));
918 
919  auto sub_inv_mat =
920  matrix_range(invMat, get_range(rbi), get_range(cbi));
921  bM.resize(sub_inv_mat.size1(), sub_inv_mat.size2());
922  noalias(bM) = sub_inv_mat;
923 
924  for (auto a_lo = a_lo_tmp; a_lo != a_hi; ++a_lo) {
925 #ifndef NDEBUG
926  if (block_indexing.find((*a_lo)->uidRow) != block_indexing.end())
927  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
928  "Wrong a_lo->uidRow");
929 #endif
930 
931  auto &a = (*a_lo)->getMat();
932  abM.resize(a.size1(), bM.size2(), false);
933  abM.clear();
934  cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
935 
936  a.size1(), bM.size2(), a.size2(), 1.,
937 
938  &*a.data().begin(), a.size2(),
939 
940  &*bM.data().begin(), bM.size2(), 0.,
941 
942  &*abM.data().begin(), abM.size2());
943 
944  for (auto c_lo = c_lo_tmp; c_lo != c_hi; ++c_lo) {
945 #ifndef NDEBUG
946  if (block_indexing.find((*c_lo)->uidCol) !=
947  block_indexing.end())
948  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
949  "Wrong a_lo->uidRow");
950 #endif
951 
952  auto &c = (*c_lo)->getMat();
953 
954  abcM.resize(abM.size1(), c.size2(), false);
955  abcM.clear();
956 
957  cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
958  abM.size1(), c.size2(), abM.size2(), -1.,
959 
960  &*abM.data().begin(), abM.size2(),
961 
962  &*c.data().begin(), c.size2(), 0.,
963 
964  &*abcM.data().begin(), abcM.size2());
965 
966  CHKERR assemble_schur(abcM, (*a_lo)->uidRow, (*c_lo)->uidCol,
967  &((*a_lo)->getRowInd()),
968  &((*c_lo)->getColInd()));
969 
970  if (symSchur && rp_uid_it != cp_uid_it) {
971  abcM = trans(abcM);
972  CHKERR assemble_schur(abcM, (*c_lo)->uidCol, (*a_lo)->uidRow,
973  &((*c_lo)->getColInd()),
974  &((*a_lo)->getRowInd()));
975  }
976  }
977  }
978  }
979  }
980  }
981  }
982 
984  };
985 
986  // Assemble Schur complements
987  CHKERR assemble_dense_blocks();
988 
989 #ifndef NDEBUG
990  if constexpr (debug_schur)
991  MOFEM_LOG("SELF", Sev::noisy) << "Schur assemble done";
992 #endif
993 
994 #ifndef NDEBUG
995  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_opSchurAssembleEnd, 0, 0, 0, 0);
996 #endif
997 
999 }
1000 
1001 struct SchurDSYSV {
1002 
1003  static auto invertMat(MatrixDouble &m, MatrixDouble &inv) {
1005 
1006  const auto nb = m.size1();
1007 #ifndef NDEBUG
1008  if (nb != m.size2()) {
1009  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1010  "It should be square matrix %d != %d", nb, m.size2());
1011  }
1012 #endif
1013 
1014  inv.resize(nb, nb, false);
1015  inv.swap(m);
1016  m.resize(2 * nb, 2 * nb, false);
1017 
1018  VectorInt ipiv(nb);
1019  int info;
1020 
1021  // FIXME: add lapack_dsytrf and lapack_dsytri
1022 
1023  info = lapack_dgetrf(nb, nb, &*inv.data().begin(), nb, &*ipiv.begin());
1024  if (info)
1025  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1026  "lapack error info = %d", info);
1027  info = lapack_dgetri(nb, &*inv.data().begin(), nb, &*ipiv.begin(),
1028  &*m.data().begin(), m.data().size());
1029  if (info)
1030  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1031  "lapack error info = %d", info);
1032 
1034  }
1035 };
1036 
1037 struct SchurDGESV {
1038 
1039  static auto invertMat(MatrixDouble &m, MatrixDouble &inv) {
1041 
1042  const auto nb = m.size1();
1043 #ifndef NDEBUG
1044  if (nb != m.size2()) {
1045  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1046  "It should be square matrix %d != %d", nb, m.size2());
1047  }
1048 #endif
1049 
1050  inv.resize(nb, nb, false);
1051  inv.swap(m);
1052 
1053  VectorInt ipiv(nb);
1054  int info;
1055 
1056  info = lapack_dgetrf(nb, nb, &*inv.data().begin(), nb, &*ipiv.begin());
1057  if (info)
1058  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1059  "lapack error info = %d", info);
1060  info = lapack_dgetri(nb, &*inv.data().begin(), nb, &*ipiv.begin(),
1061  &*m.data().begin(), m.data().size());
1062  if (info)
1063  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1064  "lapack error info = %d", info);
1065 
1067  }
1068 };
1069 
1073  return doWorkImpl<SchurDSYSV>(side, type, data);
1074 }
1075 
1079  return doWorkImpl<SchurDGESV>(side, type, data);
1080 }
1081 
1082 boost::shared_ptr<BlockStructure> createBlockMatStructure(
1083 
1084  DM dm, //< dm
1085  SchurFEOpsFEandFields schur_fe_op_vec //< block elements
1086 
1087 ) {
1088 
1089  auto cmp_uid_lo = [](const boost::weak_ptr<FieldEntity> &a, const UId &b) {
1090  if (auto a_ptr = a.lock()) {
1091  if (a_ptr->getLocalUniqueId() < b)
1092  return true;
1093  else
1094  return false;
1095  } else {
1096  return false;
1097  }
1098  };
1099 
1100  auto cmp_uid_hi = [](const UId &b, const boost::weak_ptr<FieldEntity> &a) {
1101  if (auto a_ptr = a.lock()) {
1102  if (b < a_ptr->getLocalUniqueId())
1103  return true;
1104  else
1105  return false;
1106  } else {
1107  return true;
1108  }
1109  };
1110 
1111  // get uids for fields
1112  auto get_uid_pair = [](const auto &field_id) {
1113  auto lo_uid = FieldEntity::getLocalUniqueIdCalculate(
1114  field_id, get_id_for_min_type<MBVERTEX>());
1115  auto hi_uid = FieldEntity::getLocalUniqueIdCalculate(
1116  field_id, get_id_for_max_type<MBENTITYSET>());
1117  return std::make_pair(lo_uid, hi_uid);
1118  };
1119 
1120  // get uids pair
1121  auto get_it_pair = [cmp_uid_lo, cmp_uid_hi](auto &&field_ents, auto &&p_uid) {
1122  auto lo = std::lower_bound(field_ents.begin(), field_ents.end(),
1123  p_uid.first, cmp_uid_lo);
1124  auto hi = std::upper_bound(field_ents.begin(), field_ents.end(),
1125  p_uid.second, cmp_uid_hi);
1126  return std::make_pair(lo, hi);
1127  };
1128 
1129  // extract DOFs for rows/columns. DOFs are associated with fields entities
1130  // for given problem.
1131  auto row_extractor = [](auto &e) { return e->entityCacheRowDofs; };
1132  auto col_extractor = [](auto &e) { return e->entityCacheColDofs; };
1133 
1134  auto extract_data = [](auto &&its, auto extractor) {
1135  std::vector<std::tuple<UId, int, int, int>> data;
1136  data.reserve(std::distance(its.first, its.second));
1137  // iterate field dofs
1138  for (; its.first != its.second; ++its.first) {
1139  if (auto e = its.first->lock()) {
1140  if (auto cache = extractor(e).lock()) {
1141  int nb_dofs = 0;
1142  for (auto dof = cache->loHi[0]; dof != cache->loHi[1]; ++dof) {
1143  if ((*dof)->getPetscGlobalDofIdx() != -1)
1144  ++nb_dofs;
1145  }
1146  auto uid = e->getLocalUniqueId();
1147  if (nb_dofs) {
1148 
1149  auto glob = (*cache->loHi[0])->getPetscGlobalDofIdx();
1150  auto loc = (*cache->loHi[0])->getPetscLocalDofIdx();
1151  while (glob == -1 && cache->loHi[0] != cache->loHi[1]) {
1152  ++cache->loHi[0];
1153  glob = (*cache->loHi[0])->getPetscGlobalDofIdx();
1154  loc = (*cache->loHi[0])->getPetscLocalDofIdx();
1155  }
1156  data.emplace_back(uid, glob, nb_dofs, loc);
1157 
1158 #ifndef NDEBUG
1159 
1160  for (auto lo = cache->loHi[0]; lo != cache->loHi[1]; ++lo) {
1161  auto glob = (*lo)->getPetscGlobalDofIdx();
1162  if (glob == -1) {
1164  "Wrong global index");
1165  }
1166  }
1167 
1168 #endif
1169  } else {
1170  data.emplace_back(uid, -1, 0, -1);
1171  }
1172  }
1173  }
1174  }
1175  return data;
1176  };
1177 
1178  auto data_ptr = boost::make_shared<BlockStructure>();
1179  auto m_field_ptr = getInterfacePtr(dm);
1180 
1181  // create element to extract data
1182  auto fe_method = boost::shared_ptr<MoFEM::FEMethod>(new MoFEM::FEMethod());
1183 
1184  for (auto &d : schur_fe_op_vec) {
1185 
1186  // extract bit numbers for row and column
1187  auto get_bit_numbers = [&d](auto op) {
1188  std::vector<FieldBitNumber> bit_numbers(d.second.size());
1189  std::transform(d.second.begin(), d.second.end(), bit_numbers.begin(), op);
1190  return bit_numbers;
1191  };
1192 
1193  // extract bit numbers for row
1194  auto row_bit_numbers = get_bit_numbers(
1195  [&](auto &p) { return m_field_ptr->get_field_bit_number(p.first); });
1196  // extract bit numbers for row
1197  auto col_bit_numbers = get_bit_numbers(
1198  [&](auto &p) { return m_field_ptr->get_field_bit_number(p.second); });
1199 
1200  fe_method->preProcessHook = []() { return 0; };
1201  fe_method->postProcessHook = []() { return 0; };
1202  fe_method->operatorHook = [&]() {
1204 
1205  auto fe_uid = fe_method->numeredEntFiniteElementPtr->getLocalUniqueId();
1206 
1207  for (auto f = 0; f != row_bit_numbers.size(); ++f) {
1208 
1209  auto row_data =
1210  extract_data(get_it_pair(fe_method->getRowFieldEnts(),
1211  get_uid_pair(row_bit_numbers[f])),
1212  row_extractor);
1213  auto col_data =
1214  extract_data(get_it_pair(fe_method->getColFieldEnts(),
1215  get_uid_pair(col_bit_numbers[f])),
1216  col_extractor);
1217 
1218  for (auto &r : row_data) {
1219  auto [r_uid, r_glob, r_nb_dofs, r_loc] = r;
1220  for (auto &c : col_data) {
1221  auto [c_uid, c_glob, c_nb_dofs, c_loc] = c;
1222  data_ptr->blockIndex.insert(BlockStructure::Indexes{
1223  r_uid, c_uid, fe_uid, r_glob, c_glob, r_nb_dofs, c_nb_dofs,
1224  r_loc, c_loc, -1});
1225  }
1226  }
1227  }
1228 
1230  };
1231 
1232  CHKERR DMoFEMLoopFiniteElementsUpAndLowRank(dm, d.first, fe_method, 0,
1233  m_field_ptr->get_comm_size());
1234  };
1235 
1236  // order by column (that is for matrix multiplication)
1237  auto mem_size = 0;
1238  for (auto &v : data_ptr->blockIndex.get<0>()) {
1239  v.getMatShift() = mem_size;
1240  mem_size += v.getNbCols() * v.getNbRows();
1241  }
1242 
1243  std::vector<double> tmp;
1244  if (mem_size > tmp.max_size())
1246 
1247  data_ptr->dataBlocksPtr =
1248  boost::make_shared<std::vector<double>>(mem_size, 0);
1249 
1250  auto ghost_x = createDMVector(dm);
1251  auto ghost_y = createDMVector(dm);
1252  CHKERR VecSetDM(ghost_x, PETSC_NULL);
1253  CHKERR VecSetDM(ghost_y, PETSC_NULL);
1254 
1255  data_ptr->ghostX = ghost_x;
1256  data_ptr->ghostY = ghost_y;
1257 
1258  return data_ptr;
1259 }
1260 
1262  Mat mat, Vec x, Vec y, InsertMode iora,
1263  boost::function<
1264  int(DiagBlockIndex::BlockIndex::nth_index<0>::type::iterator)>
1265  shift_extractor,
1266  boost::shared_ptr<std::vector<double>> data_blocks_ptr,
1267  bool multiply_by_preconditioner);
1268 
1269 static MoFEMErrorCode solve_schur_block_shell(Mat mat, Vec y, Vec x,
1270  InsertMode iora);
1271 
1272 static PetscErrorCode mult(Mat mat, Vec x, Vec y) {
1273  BlockStructure *ctx;
1274  CHKERR MatShellGetContext(mat, (void **)&ctx);
1275  return mult_schur_block_shell(
1276  mat, x, y, INSERT_VALUES,
1277 
1278  [](DiagBlockIndex::BlockIndex::nth_index<0>::type::iterator it) {
1279  return it->getMatShift();
1280  },
1281 
1282  ctx->dataBlocksPtr, true);
1283 }
1284 static PetscErrorCode mult_add(Mat mat, Vec x, Vec y) {
1285  BlockStructure *ctx;
1286  CHKERR MatShellGetContext(mat, (void **)&ctx);
1287  return mult_schur_block_shell(
1288  mat, x, y, ADD_VALUES,
1289 
1290  [](DiagBlockIndex::BlockIndex::nth_index<0>::type::iterator it) {
1291  return it->getMatShift();
1292  },
1293 
1294  ctx->dataBlocksPtr, true);
1295 }
1296 static PetscErrorCode solve(Mat mat, Vec x, Vec y) {
1297  return solve_schur_block_shell(mat, x, y, INSERT_VALUES);
1298 }
1299 static PetscErrorCode solve_add(Mat mat, Vec x, Vec y) {
1300  return solve_schur_block_shell(mat, x, y, ADD_VALUES);
1301 }
1302 
1303 static PetscErrorCode zero_rows_columns(Mat A, PetscInt N,
1304  const PetscInt rows[], PetscScalar diag,
1305  Vec x, Vec b) {
1306 
1308  BlockStructure *ctx;
1309  CHKERR MatShellGetContext(A, (void **)&ctx);
1310 
1311  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_zeroRowsAndCols, 0, 0, 0, 0);
1312 
1313  using BlockIndexView = multi_index_container<
1314 
1315  const DiagBlockIndex::Indexes *,
1316 
1317  indexed_by<
1318 
1319  ordered_non_unique<
1320 
1321  const_mem_fun<DiagBlockIndex::Indexes, int,
1322  &DiagBlockIndex::Indexes::rowShift>
1323 
1324  >,
1325 
1326  ordered_non_unique<
1327 
1328  const_mem_fun<DiagBlockIndex::Indexes, int,
1329  &DiagBlockIndex::Indexes::colShift>
1330 
1331  >
1332 
1333  >>;
1334 
1335  BlockIndexView view;
1336  auto hint = view.get<0>().end();
1337  for (auto &v : ctx->blockIndex) {
1338  hint = view.insert(hint, &v);
1339  }
1340 
1341  const int *ptr = &rows[0];
1342  int is_nb_rows = N;
1343  SmartPetscObj<IS> is_local;
1344 
1345  MPI_Comm comm;
1346  CHKERR PetscObjectGetComm((PetscObject)A, &comm);
1347  int size;
1348  MPI_Comm_size(comm, &size);
1349  if (size > 1) {
1350  auto is = createISGeneral(comm, N, rows, PETSC_USE_POINTER);
1351  is_local = isAllGather(is);
1352  }
1353  if (is_local) {
1354  CHKERR ISGetSize(is_local, &is_nb_rows);
1355 #ifndef NDEBUG
1356  if constexpr (debug_schur) {
1357  CHKERR ISView(is_local, PETSC_VIEWER_STDOUT_WORLD);
1358  }
1359 #endif
1360  CHKERR ISGetIndices(is_local, &ptr);
1361  }
1362 
1363  int loc_m, loc_n;
1364  CHKERR MatGetLocalSize(A, &loc_m, &loc_n);
1365 
1366  for (auto n = 0; n != is_nb_rows; ++n) {
1367  auto row = ptr[n];
1368  auto rlo = view.get<0>().lower_bound(row);
1369  auto rhi = view.get<0>().end();
1370  for (; rlo != rhi; ++rlo) {
1371  auto r_shift = row - (*rlo)->getRow();
1372  if (r_shift >= 0 && r_shift < (*rlo)->getNbRows()) {
1373  auto *ptr = &(*ctx->dataBlocksPtr)[(*rlo)->getMatShift()];
1374  for (auto i = 0; i != (*rlo)->getNbCols(); ++i) {
1375  ptr[i + r_shift * (*rlo)->getNbCols()] = 0;
1376  }
1377  } else if ((*rlo)->getRow() + (*rlo)->getNbRows() > row) {
1378  break;
1379  }
1380  }
1381  }
1382 
1383  for (auto n = 0; n != is_nb_rows; ++n) {
1384  auto col = ptr[n];
1385  auto clo = view.get<1>().lower_bound(col);
1386  auto chi = view.get<1>().end();
1387  for (; clo != chi; ++clo) {
1388  auto c_shift = col - (*clo)->getCol();
1389  if (c_shift >= 0 && c_shift < (*clo)->getNbCols()) {
1390 
1391  auto *ptr = &(*ctx->dataBlocksPtr)[(*clo)->getMatShift()];
1392  for (auto i = 0; i != (*clo)->getNbRows(); ++i) {
1393  ptr[c_shift + i * (*clo)->getNbCols()] = 0;
1394  }
1395 
1396  // diagonal
1397  if (
1398 
1399  (*clo)->getRow() == (*clo)->getCol() &&
1400  (*clo)->getLocRow() < loc_m && (*clo)->getLocCol() < loc_n
1401 
1402  ) {
1403  auto r_shift = col - (*clo)->getCol();
1404  if (r_shift >= 0 && r_shift < (*clo)->getNbRows()) {
1405  ptr[c_shift + r_shift * (*clo)->getNbCols()] = diag;
1406  }
1407  }
1408  } else if ((*clo)->getCol() + (*clo)->getNbCols() > col) {
1409  break;
1410  }
1411  }
1412  }
1413 
1414  if (is_local) {
1415  CHKERR ISRestoreIndices(is_local, &ptr);
1416  }
1417 
1418  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_zeroRowsAndCols, 0, 0, 0, 0);
1419 
1421 }
1422 
1423 static PetscErrorCode mat_zero(Mat m) {
1425  BlockStructure *ctx;
1426  CHKERR MatShellGetContext(m, (void **)&ctx);
1427  if (ctx->dataBlocksPtr)
1428  std::fill(ctx->dataBlocksPtr->begin(), ctx->dataBlocksPtr->end(), 0.);
1429  if (ctx->preconditionerBlocksPtr)
1430  std::fill(ctx->preconditionerBlocksPtr->begin(),
1431  ctx->preconditionerBlocksPtr->end(), 0.);
1433 }
1434 
1437  CHKERR MatShellSetManageScalingShifts(mat_raw);
1438  CHKERR MatShellSetOperation(mat_raw, MATOP_MULT, (void (*)(void))mult);
1439  CHKERR MatShellSetOperation(mat_raw, MATOP_MULT_ADD,
1440  (void (*)(void))mult_add);
1441  CHKERR MatShellSetOperation(mat_raw, MATOP_SOLVE, (void (*)(void))solve);
1442  CHKERR MatShellSetOperation(mat_raw, MATOP_SOLVE_ADD,
1443  (void (*)(void))solve_add);
1444  CHKERR MatShellSetOperation(mat_raw, MATOP_ZERO_ENTRIES,
1445  (void (*)(void))mat_zero);
1446  CHKERR MatShellSetOperation(mat_raw, MATOP_ZERO_ROWS_COLUMNS,
1447  (void (*)(void))zero_rows_columns);
1448 
1450 };
1451 
1453  boost::shared_ptr<BlockStructure> data) {
1454 
1455  auto problem_ptr = getProblemPtr(dm);
1456  auto nb_local = problem_ptr->nbLocDofsRow;
1457  auto nb_global = problem_ptr->nbDofsRow;
1458 
1459  // check in nb, rows is equal to nb. columns.
1460  if (nb_local != problem_ptr->nbLocDofsCol) {
1461  MOFEM_LOG("SELF", Sev::error)
1462  << "Wrong size " << nb_local << " != " << problem_ptr->nbLocDofsCol;
1464  "nb. cols is inconsistent with nb. rows");
1465  }
1466  if (nb_global != problem_ptr->nbDofsCol) {
1467  MOFEM_LOG("SELF", Sev::error)
1468  << "Wrong size " << nb_global << " != " << problem_ptr->nbDofsCol;
1470  "nb. cols is inconsistent with nb. rows");
1471  }
1472 
1473  // get comm from DM
1474  MPI_Comm comm;
1475  CHKERR PetscObjectGetComm((PetscObject)dm, &comm);
1476 
1477  Mat mat_raw;
1478  CHKERR MatCreateShell(comm, nb_local, nb_local, nb_global, nb_global,
1479  (void *)data.get(), &mat_raw);
1480  CHKERR setSchurBlockMatOps(mat_raw);
1481  // CHKERR PetscObjectSetName((PetscObject)mat_raw, MoFEM_BLOCK_MAT);
1482 
1483  return std::make_pair(SmartPetscObj<Mat>(mat_raw), data);
1484 }
1485 
1486 constexpr int max_gemv_size = 2;
1487 
1489  Mat mat, Vec x, Vec y, InsertMode iora,
1490 
1491  boost::function<
1492  int(DiagBlockIndex::BlockIndex::nth_index<0>::type::iterator)>
1493  shift_extractor,
1494 
1495  boost::shared_ptr<std::vector<double>> data_blocks_ptr,
1496  bool multiply_by_preconditioner) {
1498  BlockStructure *ctx;
1499  CHKERR MatShellGetContext(mat, (void **)&ctx);
1500 
1501  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_BlockStructureMult, 0, 0, 0, 0);
1502 
1503  int x_loc_size;
1504  CHKERR VecGetLocalSize(x, &x_loc_size);
1505  int y_loc_size;
1506  CHKERR VecGetLocalSize(y, &y_loc_size);
1507 
1508  Vec ghost_x = ctx->ghostX;
1509  Vec ghost_y = ctx->ghostY;
1510 
1511  CHKERR VecCopy(x, ghost_x);
1512 
1513  double *y_array;
1514  Vec loc_ghost_y;
1515  CHKERR VecGhostGetLocalForm(ghost_y, &loc_ghost_y);
1516  int nb_y;
1517  CHKERR VecGetLocalSize(loc_ghost_y, &nb_y);
1518  CHKERR VecGetArray(loc_ghost_y, &y_array);
1519  for (auto i = 0; i != nb_y; ++i)
1520  y_array[i] = 0.;
1521  CHKERR VecRestoreArray(loc_ghost_y, &y_array);
1522  CHKERR VecGhostRestoreLocalForm(ghost_y, &loc_ghost_y);
1523 
1524  auto mult = [&](int low_x, int hi_x, int low_y, int hi_y) {
1526 
1527  double *x_array;
1528  Vec loc_ghost_x;
1529  CHKERR VecGhostGetLocalForm(ghost_x, &loc_ghost_x);
1530  CHKERR VecGetArray(loc_ghost_x, &x_array);
1531 
1532  double *y_array;
1533  Vec loc_ghost_y;
1534  CHKERR VecGhostGetLocalForm(ghost_y, &loc_ghost_y);
1535  int nb_y;
1536  CHKERR VecGetLocalSize(loc_ghost_y, &nb_y);
1537  CHKERR VecGetArray(loc_ghost_y, &y_array);
1538 
1539  double *block_ptr = &*data_blocks_ptr->begin();
1540  auto it = ctx->blockIndex.get<0>().begin();
1541  auto hi = ctx->blockIndex.get<0>().end();
1542 
1543  while (it != hi) {
1544  if (it->getLocRow() < low_y || it->getLocRow() >= hi_y ||
1545  it->getLocCol() < low_x || it->getLocCol() >= hi_x) {
1546  ++it;
1547  continue;
1548  }
1549 
1550  auto nb_rows = it->getNbRows();
1551  auto nb_cols = it->getNbCols();
1552  auto x_ptr = &x_array[it->getLocCol()];
1553  auto y_ptr = &y_array[it->getLocRow()];
1554  auto ptr = &block_ptr[shift_extractor(it)];
1555 
1556  if (std::min(nb_rows, nb_cols) > max_gemv_size) {
1557  cblas_dgemv(CblasRowMajor, CblasNoTrans, nb_rows, nb_cols, 1.0, ptr,
1558  nb_cols, x_ptr, 1, 1.0, y_ptr, 1);
1559  } else {
1560  for (auto r = 0; r != nb_rows; ++r) {
1561  for (auto c = 0; c != nb_cols; ++c) {
1562  y_ptr[r] += ptr[r * nb_cols + c] * x_ptr[c];
1563  }
1564  }
1565  }
1566  ++it;
1567  }
1568 
1569  if (multiply_by_preconditioner && ctx->multiplyByPreconditioner) {
1570 
1571  if (!ctx->preconditionerBlocksPtr)
1572  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1573  "No parentBlockStructurePtr");
1574 
1575  auto preconditioner_ptr = &*ctx->preconditionerBlocksPtr->begin();
1576 
1577  auto it = ctx->blockIndex.get<0>().begin();
1578  auto hi = ctx->blockIndex.get<0>().end();
1579 
1580  while (it != hi) {
1581  if (it->getLocRow() < low_y || it->getLocRow() >= hi_y ||
1582  it->getLocCol() < low_x || it->getLocCol() >= hi_x) {
1583  ++it;
1584  continue;
1585  }
1586 
1587  if (it->getMatShift() != -1) {
1588  auto nb_rows = it->getNbRows();
1589  auto nb_cols = it->getNbCols();
1590  auto x_ptr = &x_array[it->getLocCol()];
1591  auto y_ptr = &y_array[it->getLocRow()];
1592  auto ptr = &preconditioner_ptr[it->getMatShift()];
1593  if (std::min(nb_rows, nb_cols) > max_gemv_size) {
1594  cblas_dgemv(CblasRowMajor, CblasNoTrans, nb_rows, nb_cols, 1.0, ptr,
1595  nb_cols, x_ptr, 1, 1.0, y_ptr, 1);
1596  } else {
1597  for (auto r = 0; r != nb_rows; ++r) {
1598  for (auto c = 0; c != nb_cols; ++c) {
1599  y_ptr[r] += ptr[r * nb_cols + c] * x_ptr[c];
1600  }
1601  }
1602  }
1603  }
1604 
1605  ++it;
1606  }
1607  }
1608 
1609  CHKERR VecRestoreArray(loc_ghost_x, &x_array);
1610  CHKERR VecRestoreArray(loc_ghost_y, &y_array);
1611  CHKERR VecGhostRestoreLocalForm(ghost_x, &loc_ghost_x);
1612  CHKERR VecGhostRestoreLocalForm(ghost_y, &loc_ghost_y);
1614  };
1615 
1616  constexpr auto max_int = std::numeric_limits<int>::max();
1617  CHKERR VecGhostUpdateBegin(ghost_x, INSERT_VALUES, SCATTER_FORWARD);
1618  CHKERR mult(0, x_loc_size, 0, max_int);
1619  CHKERR VecGhostUpdateEnd(ghost_x, INSERT_VALUES, SCATTER_FORWARD);
1620  CHKERR mult(x_loc_size, max_int, 0, max_int);
1621 
1622  CHKERR VecGhostUpdateBegin(ghost_y, ADD_VALUES, SCATTER_REVERSE);
1623  CHKERR VecGhostUpdateEnd(ghost_y, ADD_VALUES, SCATTER_REVERSE);
1624 
1625  switch (iora) {
1626  case INSERT_VALUES:
1627  CHKERR VecCopy(ghost_y, y);
1628  break;
1629  case ADD_VALUES:
1630  CHKERR VecAXPY(y, 1., ghost_y);
1631  break;
1632  default:
1633  CHK_THROW_MESSAGE(MOFEM_NOT_IMPLEMENTED, "Wrong InsertMode");
1634  }
1635 
1636 #ifndef NDEBUG
1637 
1638  auto print_norm = [&](auto msg, auto y) {
1640  PetscReal norm;
1641  CHKERR VecNorm(y, NORM_2, &norm);
1642  int nb_loc_y;
1643  CHKERR VecGetLocalSize(y, &nb_loc_y);
1644  int nb_y;
1645  CHKERR VecGetSize(y, &nb_y);
1646  MOFEM_LOG("WORLD", Sev::noisy)
1647  << msg << " " << nb_y << " " << nb_loc_y << " norm " << norm;
1649  };
1650 
1651  switch (iora) {
1652  case INSERT_VALUES:
1653  print_norm("mult_schur_block_shell insert x", x);
1654  print_norm("mult_schur_block_shell insert y", y);
1655  break;
1656  case ADD_VALUES:
1657  print_norm("mult_schur_block_shell add x", x);
1658  print_norm("mult_schur_block_shell add y", y);
1659  break;
1660  default:
1661  CHK_THROW_MESSAGE(MOFEM_NOT_IMPLEMENTED, "Wrong InsertMode");
1662  }
1663 
1664 #endif // NDEBUG
1665 
1666  // PetscLogFlops(xxx)
1667  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_BlockStructureMult, 0, 0, 0, 0);
1668 
1670 }
1671 
1673  InsertMode iora) {
1674  using matrix_range = ublas::matrix_range<MatrixDouble>;
1675  using range = ublas::range;
1677  BlockStructure *ctx;
1678  CHKERR MatShellGetContext(mat, (void **)&ctx);
1679 
1680  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_BlockStructureSolve, 0, 0, 0, 0);
1681 
1682  if (ctx->multiplyByPreconditioner) {
1683  if (!ctx->preconditionerBlocksPtr)
1684  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1685  "No preconditionerBlocksPtr");
1686  }
1687 
1688  if (iora == INSERT_VALUES) {
1689  CHKERR VecZeroEntries(x);
1690  }
1691 
1692  double *x_array;
1693  CHKERR VecGetArray(x, &x_array);
1694  double *y_array;
1695  CHKERR VecGetArray(y, &y_array);
1696 
1697  std::vector<const DiagBlockIndex::Indexes *> blocks;
1699  VectorDouble block_y;
1700  VectorInt ipiv;
1701 
1702  auto &block_index = ctx->blockIndex.get<1>();
1703  for (auto it = block_index.begin(); it != block_index.end();) {
1704 
1705  // get blocks on finit element
1706  blocks.clear();
1707  auto last_uid = it->getFEUId();
1708  while (it != block_index.end() && it->getFEUId() == last_uid) {
1709  blocks.push_back(&*it);
1710  ++it;
1711  }
1712 
1713  // set local indices
1714  std::map<UId, std::tuple<int, int, int>> block_indexing; // uid block map
1715  for (auto b : blocks) {
1716  if (block_indexing.find(b->getRowUId()) == block_indexing.end()) {
1717  block_indexing[b->getRowUId()] =
1718  std::make_tuple(b->getNbRows(), b->getNbRows(), b->getLocRow());
1719  }
1720  }
1721  std::sort(blocks.begin(), blocks.end(), [](auto a, auto b) {
1722  if (a->getRowUId() == b->getRowUId())
1723  return a->getColUId() < b->getColUId();
1724  else
1725  return a->getRowUId() < b->getRowUId();
1726  });
1727 
1728  // set indexes to block
1729  int mat_block_size = 0; // size of block matrix
1730  for (auto &b : block_indexing) {
1731  auto &[index, size, loc] = b.second;
1732  index = mat_block_size;
1733  mat_block_size += size;
1734  }
1735 
1736  std::vector<std::tuple<int, int, int, int, int>> block_data;
1737  block_data.reserve(blocks.size());
1738  for (auto s : blocks) {
1739  auto ruid = s->getRowUId();
1740  auto cuid = s->getColUId();
1741  auto &rbi = block_indexing.at(ruid);
1742  auto &cbi = block_indexing.at(cuid);
1743  if (auto shift = s->getMatShift(); shift != -1) {
1744  block_data.push_back(std::make_tuple(
1745 
1746  shift,
1747 
1748  s->getNbRows(), s->getNbCols(),
1749 
1750  get<0>(rbi), get<0>(cbi))
1751 
1752  );
1753  }
1754  }
1755  block_mat.resize(mat_block_size, mat_block_size, false);
1756  block_mat.clear();
1757  for (auto &bd : block_data) {
1758  auto &[shift, nb_rows, nb_cols, ridx, cidx] = bd;
1759  auto ptr = &(*(ctx->dataBlocksPtr))[shift];
1760  for (auto i = 0; i != nb_rows; ++i, ptr += nb_cols) {
1761  auto sub_ptr = &block_mat(ridx + i, cidx);
1762  cblas_dcopy(nb_cols, ptr, 1, sub_ptr, 1);
1763  }
1764  }
1765  if (ctx->multiplyByPreconditioner) {
1766  for (auto &bd : block_data) {
1767  auto &[shift, nb_rows, nb_cols, ridx, cidx] = bd;
1768  auto ptr = &(*(ctx->preconditionerBlocksPtr))[shift];
1769  for (auto i = 0; i != nb_rows; ++i, ptr += nb_cols) {
1770  auto sub_ptr = &block_mat(ridx + i, cidx);
1771  cblas_daxpy(nb_cols, 1., ptr, 1, sub_ptr, 1);
1772  }
1773  }
1774  }
1775  block_mat = trans(block_mat);
1776 
1777  block_y.resize(mat_block_size, false);
1778  block_y.clear();
1779 
1780  for (auto &b : block_indexing) {
1781  auto [index, size, loc] = b.second;
1782  auto ptr = &y_array[loc];
1783  cblas_dcopy(size, ptr, 1, &block_y[index], 1);
1784  }
1785 
1786  // note: this not exploits symmetry, requires more implementation
1787  constexpr int nrhs = 1;
1788  ipiv.resize(mat_block_size, false);
1789  auto info = lapack_dgesv(mat_block_size, nrhs, &*block_mat.data().begin(),
1790  mat_block_size, &*ipiv.data().begin(),
1791  &*block_y.data().begin(), mat_block_size);
1792  if (info != 0) {
1793  MOFEM_LOG("SELF", Sev::error)
1794  << "error lapack solve dgesv info = " << info;
1795  MOFEM_LOG("SELF", Sev::error) << block_mat;
1796  SETERRQ1(PETSC_COMM_SELF, MOFEM_OPERATION_UNSUCCESSFUL,
1797  "error lapack solve dgesv info = %d", info);
1798  }
1799 
1800  for (auto &b : block_indexing) {
1801  auto [index, size, loc] = b.second;
1802  auto ptr = &x_array[loc];
1803  cblas_daxpy(size, 1.0, &block_y[index], 1, ptr, 1);
1804  }
1805  }
1806 
1807  CHKERR VecRestoreArray(x, &x_array);
1808  CHKERR VecRestoreArray(y, &y_array);
1809 
1810  // PetscLogFlops(xxx)
1811  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_BlockStructureSolve, 0, 0, 0, 0);
1812 
1814 }
1815 
1818  std::vector<std::string> fields_name,
1819  std::vector<boost::shared_ptr<Range>> field_ents,
1820  SmartPetscObj<AO> ao) {
1821  using matrix_range = ublas::matrix_range<MatrixDouble>;
1822  using range = ublas::range;
1824  BlockStructure *ctx;
1825  CHKERR MatShellGetContext(B, (void **)&ctx);
1826 
1827  constexpr bool debug = false;
1828 
1829  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_AssembleSchurMat, 0, 0, 0, 0);
1830 
1831  if (ao.use_count() == 0) {
1832  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "No AO set");
1833  }
1834 
1835  if (ctx->multiplyByPreconditioner) {
1836  if (!ctx->preconditionerBlocksPtr)
1837  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1838  "No preconditionerBlocksPtr");
1839  }
1840 
1841  std::vector<std::pair<UId, UId>> a00_fields_uids;
1842  a00_fields_uids.reserve(fields_name.size());
1843  auto it_field_ents = field_ents.begin();
1844  if (fields_name.size() != field_ents.size()) {
1845  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1846  "fields_name.size() != field_ents.size()");
1847  }
1848  for (auto &f : fields_name) {
1849  int bn = m_field.get_field_bit_number(f);
1850  if (*it_field_ents) {
1851  for (auto p = (*it_field_ents)->pair_begin();
1852  p != (*it_field_ents)->pair_end(); ++p) {
1853  a00_fields_uids.emplace_back(
1854  DofEntity::getLoFieldEntityUId(bn, p->first),
1855  DofEntity::getHiFieldEntityUId(bn, p->second));
1856  }
1857  } else {
1858  a00_fields_uids.emplace_back(FieldEntity::getLoBitNumberUId(bn),
1859  FieldEntity::getHiBitNumberUId(bn));
1860  }
1861  ++it_field_ents;
1862  }
1863 
1864  std::vector<const DiagBlockIndex::Indexes *> fe_blocks;
1865  std::vector<const DiagBlockIndex::Indexes *> a00_blocks;
1866  std::vector<const DiagBlockIndex::Indexes *> a01_blocks;
1867  std::vector<const DiagBlockIndex::Indexes *> a10_blocks;
1868  std::vector<const DiagBlockIndex::Indexes *> a11_blocks;
1869  MatrixDouble block_mat_a00;
1870  MatrixDouble block_mat_a01;
1871  MatrixDouble block_mat_a10;
1872  MatrixDouble block_mat_a11;
1873  VectorInt ipiv;
1874  std::vector<int> glob_idx;
1875  std::vector<int> row_glob_idx, col_glob_idx;
1876 
1877  auto &block_index = ctx->blockIndex.get<1>();
1878  for (auto it = block_index.begin(); it != block_index.end();) {
1879 
1880  // get blocks on finit element
1881  fe_blocks.clear();
1882  a00_blocks.clear();
1883  a01_blocks.clear();
1884  a10_blocks.clear();
1885  a11_blocks.clear();
1886 
1887  auto last_uid = it->getFEUId();
1888  while (it != block_index.end() && it->getFEUId() == last_uid) {
1889  fe_blocks.push_back(&*it);
1890  ++it;
1891  }
1892 
1893  for (auto b_ptr : fe_blocks) {
1894  auto check_id = [&](auto uid) {
1895  for (auto &uid_pair : a00_fields_uids) {
1896  if (uid >= uid_pair.first && uid < uid_pair.second) {
1897  return true;
1898  }
1899  }
1900  return false;
1901  };
1902  auto r00 = check_id(b_ptr->getRowUId());
1903  auto c00 = check_id(b_ptr->getColUId());
1904  if (r00 && c00) {
1905  a00_blocks.push_back(b_ptr);
1906  } else if (r00 && !c00) {
1907  a01_blocks.push_back(b_ptr);
1908  } else if (!r00 && c00) {
1909  a10_blocks.push_back(b_ptr);
1910  } else {
1911  a11_blocks.push_back(b_ptr);
1912  }
1913  }
1914 
1915  for (auto b : a11_blocks) {
1916 
1917  row_glob_idx.resize(b->getNbRows());
1918  col_glob_idx.resize(b->getNbCols());
1919 
1920  std::iota(row_glob_idx.begin(), row_glob_idx.end(), b->getRow());
1921  std::iota(col_glob_idx.begin(), col_glob_idx.end(), b->getCol());
1922  CHKERR AOApplicationToPetsc(ao, col_glob_idx.size(), col_glob_idx.data());
1923  CHKERR AOApplicationToPetsc(ao, row_glob_idx.size(), row_glob_idx.data());
1924 
1925  auto shift = b->getMatShift();
1926  if (shift != -1) {
1927  auto ptr = &(*(ctx->dataBlocksPtr))[shift];
1928  CHKERR MatSetValues(S, row_glob_idx.size(), row_glob_idx.data(),
1929  col_glob_idx.size(), col_glob_idx.data(), ptr,
1930  ADD_VALUES);
1931  if (ctx->multiplyByPreconditioner) {
1932  auto ptr = &(*(ctx->preconditionerBlocksPtr))[shift];
1933  CHKERR MatSetValues(S, row_glob_idx.size(), row_glob_idx.data(),
1934  col_glob_idx.size(), col_glob_idx.data(), ptr,
1935  ADD_VALUES);
1936  }
1937  }
1938  }
1939 
1940  if (debug) {
1941  MOFEM_LOG("WORLD", Sev::warning)
1942  << "a00_blocks.size() " << a00_blocks.size();
1943  MOFEM_LOG("WORLD", Sev::warning)
1944  << "a01_blocks.size() " << a01_blocks.size();
1945  MOFEM_LOG("WORLD", Sev::warning)
1946  << "a10_blocks.size() " << a10_blocks.size();
1947  MOFEM_LOG("WORLD", Sev::warning)
1948  << "a11_blocks.size() " << a11_blocks.size();
1949  }
1950 
1951  if(a00_blocks.size()) {
1952 
1953  for (auto r : a00_blocks) {
1954  auto r_uid = r->getRowUId();
1955  auto range = ctx->blockIndex.get<2>().equal_range(r_uid);
1956  for (auto it = range.first; it != range.second; ++it) {
1957  if (it->getFEUId() != last_uid && it->getColUId() != r_uid) {
1958  a01_blocks.push_back(&*it);
1959  }
1960  }
1961  }
1962 
1963  for (auto c : a00_blocks) {
1964  auto c_uid = c->getColUId();
1965  auto range = ctx->blockIndex.get<3>().equal_range(c_uid);
1966  for (auto it = range.first; it != range.second; ++it) {
1967  if (it->getFEUId() != last_uid && it->getRowUId() != c_uid) {
1968  a10_blocks.push_back(&*it);
1969  }
1970  }
1971  }
1972 
1973  auto sort = [](auto &blocks) {
1974  std::sort(blocks.begin(), blocks.end(), [](auto a, auto b) {
1975  if (a->getRowUId() == b->getRowUId())
1976  return a->getColUId() < b->getColUId();
1977  else
1978  return a->getRowUId() < b->getRowUId();
1979  });
1980  };
1981 
1982  sort(a00_blocks);
1983  sort(a01_blocks);
1984  sort(a10_blocks);
1985 
1986  if (debug) {
1987  MOFEM_LOG("WORLD", Sev::warning)
1988  << "a01_blocks.size() " << a01_blocks.size();
1989  MOFEM_LOG("WORLD", Sev::warning)
1990  << "a10_blocks.size() " << a10_blocks.size();
1991  }
1992 
1993  // set local indices
1994  auto set_local_indices = [](auto &row_blocks, auto &col_blocks) {
1995  // index, size, global index
1996  std::map<UId, std::tuple<int, int, int>>
1997  block_indexing; // uid block map
1998  for (auto b : row_blocks) {
1999  if (block_indexing.find(b->getRowUId()) == block_indexing.end()) {
2000  block_indexing[b->getRowUId()] =
2001  std::make_tuple(b->getNbRows(), b->getNbRows(), b->getRow());
2002  }
2003  }
2004  for (auto b : col_blocks) {
2005  if (block_indexing.find(b->getColUId()) == block_indexing.end()) {
2006  block_indexing[b->getColUId()] =
2007  std::make_tuple(b->getNbCols(), b->getNbCols(), b->getCol());
2008  }
2009  }
2010  // set indexes to block
2011  int mat_block_size = 0; // size of block matrix
2012  for (auto &b : block_indexing) {
2013  // index, size, global index
2014  auto &[index, size, glob] = b.second;
2015  index = mat_block_size;
2016  mat_block_size += size;
2017  }
2018  return std::make_pair(block_indexing, mat_block_size);
2019  };
2020 
2021  auto set_block = [&](auto &blocks, auto &row_indexing, auto &col_indexing,
2022  auto &block_mat, int row_block_size,
2023  int col_block_size) {
2024  std::vector<std::tuple<int, int, int, int, int>> block_data;
2025  block_data.reserve(blocks.size());
2026  for (auto s : blocks) {
2027  auto ruid = s->getRowUId();
2028  auto cuid = s->getColUId();
2029  auto &rbi = row_indexing.at(ruid);
2030  auto &cbi = col_indexing.at(cuid);
2031  if (auto shift = s->getMatShift(); shift != -1) {
2032  block_data.push_back(std::make_tuple(
2033 
2034  shift,
2035 
2036  s->getNbRows(), s->getNbCols(),
2037 
2038  get<0>(rbi), get<0>(cbi))
2039 
2040  );
2041  }
2042  }
2043  block_mat.resize(row_block_size, col_block_size, false);
2044  block_mat.clear();
2045  for (auto &bd : block_data) {
2046  auto &[shift, nb_rows, nb_cols, ridx, cidx] = bd;
2047  auto ptr = &(*(ctx->dataBlocksPtr))[shift];
2048  for (auto i = 0; i != nb_rows; ++i, ptr += nb_cols) {
2049  auto sub_ptr = &block_mat(ridx + i, cidx);
2050  cblas_dcopy(nb_cols, ptr, 1, sub_ptr, 1);
2051  }
2052  }
2053  if (ctx->multiplyByPreconditioner) {
2054  for (auto &bd : block_data) {
2055  auto &[shift, nb_rows, nb_cols, ridx, cidx] = bd;
2056  auto ptr = &(*(ctx->preconditionerBlocksPtr))[shift];
2057  for (auto i = 0; i != nb_rows; ++i, ptr += nb_cols) {
2058  auto sub_ptr = &block_mat(ridx + i, cidx);
2059  cblas_daxpy(nb_cols, 1., ptr, 1, sub_ptr, 1);
2060  }
2061  }
2062  }
2063  };
2064 
2065  auto [a00_indexing, a00_size] = set_local_indices(a00_blocks, a00_blocks);
2066  auto [a11_indexing, a11_size] = set_local_indices(a10_blocks, a01_blocks);
2067 
2068  if (debug) {
2069  MOFEM_LOG("WORLD", Sev::warning)
2070  << "a00_indexing.size() " << a00_indexing.size() << " a00_size "
2071  << a00_size;
2072  MOFEM_LOG("WORLD", Sev::warning)
2073  << "a11_indexing.size() " << a11_indexing.size() << " a11_size "
2074  << a11_size;
2075  }
2076 
2077  set_block(a00_blocks, a00_indexing, a00_indexing, block_mat_a00, a00_size,
2078  a00_size);
2079  set_block(a01_blocks, a00_indexing, a11_indexing, block_mat_a01, a00_size,
2080  a11_size);
2081  set_block(a10_blocks, a11_indexing, a00_indexing, block_mat_a10, a11_size,
2082  a00_size);
2083  block_mat_a11.resize(a11_size, a11_size, false);
2084  block_mat_a11.clear();
2085 
2086  block_mat_a00 = trans(block_mat_a00);
2087  block_mat_a01 = trans(block_mat_a01);
2088 
2089  ipiv.resize(block_mat_a00.size1(), false);
2090  auto info =
2091  lapack_dgesv(block_mat_a00.size1(), block_mat_a01.size1(),
2092  block_mat_a00.data().data(), block_mat_a00.size2(),
2093  &*ipiv.data().begin(), block_mat_a01.data().data(),
2094  block_mat_a01.size2());
2095  if (info)
2096  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
2097  "lapack error info = %d", info);
2098 
2099  // block_mat_a01 = trans(block_mat_a01);
2100  cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
2101  block_mat_a11.size1(), block_mat_a11.size2(),
2102  block_mat_a00.size1(), -1.,
2103 
2104  block_mat_a10.data().data(), block_mat_a10.size2(),
2105 
2106  block_mat_a01.data().data(), block_mat_a01.size2(),
2107 
2108  0., block_mat_a11.data().data(), block_mat_a11.size2()
2109 
2110  );
2111 
2112  int idx = 0;
2113  std::vector<int> glob_idx(block_mat_a11.size1());
2114  for (auto &r : a11_indexing) {
2115  auto [r_index, r_size, r_glob] = r.second;
2116  std::iota(&glob_idx[idx], &glob_idx[idx + r_size], r_glob);
2117  idx += r_size;
2118  }
2119  CHKERR AOApplicationToPetsc(ao, glob_idx.size(), glob_idx.data());
2120  CHKERR MatSetValues(S, glob_idx.size(), glob_idx.data(), glob_idx.size(),
2121  glob_idx.data(), block_mat_a11.data().data(),
2122  ADD_VALUES);
2123  }
2124  }
2125 
2126  // PetscLogFlops(xxx)
2127  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_AssembleSchurMat, 0, 0, 0, 0);
2128 
2130 }
2131 
2132 boost::shared_ptr<std::vector<double>> getBlockMatStorageMat(Mat B) {
2133  BlockStructure *ctx;
2134  CHKERR MatShellGetContext(B, (void **)&ctx);
2135  return ctx->dataBlocksPtr;
2136 }
2137 
2138 boost::shared_ptr<std::vector<double>>
2140  BlockStructure *ctx;
2141  CHKERR MatShellGetContext(B, (void **)&ctx);
2142  return ctx->dataBlocksPtr;
2143 }
2144 
2146  BlockStructure *ctx, const EntitiesFieldData::EntData &row_data,
2147  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
2148  InsertMode iora,
2149  boost::function<int(const DiagBlockIndex::Indexes *)> shift_extractor,
2150  boost::shared_ptr<std::vector<double>> data_blocks_ptr) {
2151 
2153 
2154  if (row_data.getIndices().empty())
2156  if (col_data.getIndices().empty())
2158 
2159 #ifndef NDEBUG
2160 
2161  PetscLogEventBegin(SchurEvents::MOFEM_EVENT_BlockStructureSetValues, 0, 0, 0,
2162  0);
2163 
2164 #endif // NDEBUG
2165 
2166  switch (iora) {
2167  case ADD_VALUES:
2168  case INSERT_VALUES:
2169  break;
2170  default:
2171  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED, "Wrong InsertMode");
2172  }
2173 
2174  auto get_row_data = [&]() -> std::pair<bool, VectorInt> {
2175  if (auto e_ptr = row_data.getFieldEntities()[0]) {
2176  if (auto stored_data_ptr =
2177  e_ptr->getSharedStoragePtr<EssentialBcStorage>()) {
2178  MOFEM_LOG("SELF", Sev::warning) << "Can lead to unhandled behaviour";
2179  return std::make_pair(true, stored_data_ptr->entityIndices);
2180  }
2181  }
2182  return std::make_pair(false, row_data.getIndices());
2183  };
2184 
2185  auto row_indices = get_row_data();
2186 
2187  std::vector<int> ent_row_indices;
2188  std::vector<int> ent_col_indices;
2189 
2190  for (auto &rent : row_data.getFieldEntities()) {
2191  if (auto r_cache = rent->entityCacheRowDofs.lock()) {
2192 
2193  auto &row_uid = rent->getLocalUniqueId();
2194  auto &row_ind = row_indices.second;
2195 
2196  for (auto &cent : col_data.getFieldEntities()) {
2197  if (auto c_cache = cent->entityCacheColDofs.lock()) {
2198 
2199  auto &col_uid = cent->getLocalUniqueId();
2200  auto &col_ind = col_data.getIndices();
2201 
2202  auto it = ctx->blockIndex.get<0>().find(
2203 
2204  boost::make_tuple(row_uid, col_uid)
2205 
2206  );
2207 
2208 #ifndef NDEBUG
2209 
2210  if (it == ctx->blockIndex.get<0>().end()) {
2211  MOFEM_LOG_CHANNEL("SELF");
2212  MOFEM_TAG_AND_LOG("SELF", Sev::error, "BlockMat")
2213  << "missing block: row "
2214  << row_data.getFieldDofs()[0]->getName() << " col "
2215  << col_data.getFieldDofs()[0]->getName();
2216  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
2217  "Block not allocated");
2218  }
2219 
2220 #endif
2221 
2222  auto ri = row_ind.begin();
2223  auto rie = row_ind.end();
2224 
2225  auto shift = shift_extractor(&*it);
2226  auto s_mat = &(*data_blocks_ptr)[shift];
2227 
2228  auto get_ent_indices = [](auto &cache, auto &ind) {
2229  ind.clear();
2230  ind.reserve(std::distance(cache->loHi[0], cache->loHi[1]));
2231  for (auto e = cache->loHi[0]; e != cache->loHi[1]; ++e) {
2232  auto glob = (*e)->getPetscGlobalDofIdx();
2233  if (glob != -1)
2234  ind.push_back(glob);
2235  }
2236  };
2237 
2238  get_ent_indices(r_cache, ent_row_indices);
2239  if (ent_row_indices.empty())
2240  continue;
2241  get_ent_indices(c_cache, ent_col_indices);
2242  if (ent_col_indices.empty())
2243  continue;
2244 
2245  if (mat.size1() == ent_row_indices.size() &&
2246  mat.size2() == ent_col_indices.size()) {
2247 
2248  if (iora == ADD_VALUES) {
2249  cblas_daxpy(mat.data().size(), 1.0, mat.data().data(), 1, s_mat,
2250  1);
2251  } else {
2252  cblas_dcopy(mat.data().size(), mat.data().data(), 1, s_mat, 1);
2253  }
2254 
2255  } else {
2256 
2257  int row = 0;
2258  for (auto re : ent_row_indices) {
2259  ri = std::find(ri, rie, re);
2260  if (!(ri == rie && *ri != -1)) {
2261 
2262  auto ci = col_ind.begin();
2263  auto cie = col_ind.end();
2264 
2265  int col = 0;
2266  for (auto ce : ent_col_indices) {
2267  ci = std::find(ci, cie, ce);
2268  if (!(ci == cie && *ci != -1)) {
2269  auto &m = s_mat[row * ent_col_indices.size() + col];
2270  if (iora == ADD_VALUES) {
2271  m += mat(std::distance(row_ind.begin(), ri),
2272  std::distance(col_ind.begin(), ci));
2273  } else {
2274  m = mat(std::distance(row_ind.begin(), ri),
2275  std::distance(col_ind.begin(), ci));
2276  }
2277  }
2278  ++col;
2279  } // cols
2280  }
2281  ++row;
2282  } // rows
2283  }
2284 
2285  } // iterate entities
2286  }
2287  }
2288  }
2289 
2290 #ifndef NDEBUG
2291  PetscLogEventEnd(SchurEvents::MOFEM_EVENT_BlockStructureSetValues, 0, 0, 0,
2292  0);
2293 #endif // NDEBUG
2294 
2296 }
2297 
2300  const EntitiesFieldData::EntData &col_data,
2301  const MatrixDouble &mat, InsertMode iora) {
2303  PetscBool is_mat_shell = PETSC_FALSE;
2304  PetscObjectTypeCompare((PetscObject)M, MATSHELL, &is_mat_shell);
2305  if (is_mat_shell) {
2306  BlockStructure *ctx;
2307  CHKERR MatShellGetContext(M, (void **)&ctx);
2309  ctx, row_data, col_data, mat, iora,
2310  [](const DiagBlockIndex::Indexes *idx) { return idx->getMatShift(); },
2311  ctx->dataBlocksPtr);
2312  } else {
2313  CHKERR MatSetValues<AssemblyTypeSelector<PETSC>>(M, row_data, col_data, mat,
2314  iora);
2315  }
2317 }
2318 
2320  Mat M, const EntitiesFieldData::EntData &row_data,
2321  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
2322  InsertMode iora) {
2324  PetscBool is_mat_shell = PETSC_FALSE;
2325  PetscObjectTypeCompare((PetscObject)M, MATSHELL, &is_mat_shell);
2326  if (is_mat_shell) {
2327  BlockStructure *ctx;
2328  CHKERR MatShellGetContext(M, (void **)&ctx);
2329  if (!ctx->preconditionerBlocksPtr)
2330  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
2331  "No preconditionerBlocksPtr");
2333  ctx, row_data, col_data, mat, iora,
2334  [](const DiagBlockIndex::Indexes *idx) { return idx->getMatShift(); },
2335  ctx->preconditionerBlocksPtr);
2336  } else {
2337  CHKERR MatSetValues<AssemblyTypeSelector<PETSC>>(M, row_data, col_data, mat,
2338  iora);
2339  }
2341 }
2342 
2343 boost::shared_ptr<NestSchurData> createSchurNestedMatrixStruture(
2344 
2345  std::pair<SmartPetscObj<DM>, SmartPetscObj<DM>> dms,
2346  boost::shared_ptr<BlockStructure> block_mat_data_ptr,
2347 
2348  std::vector<std::string> fields_names, //< a00 fields
2349  std::vector<boost::shared_ptr<Range>>
2350  field_ents, //< a00 ranges (can be null),
2351  bool add_preconditioner_block) {
2352 
2353  if (!block_mat_data_ptr) {
2354  CHK_THROW_MESSAGE(MOFEM_DATA_INCONSISTENCY, "Block data not set");
2355  }
2356 
2357  if (fields_names.size() != field_ents.size())
2359  "fields_names.size() != field_ents.size()");
2360 
2361  auto [schur_dm, block_dm] = dms;
2362  auto schur_prb = getProblemPtr(schur_dm);
2363  auto block_prb = getProblemPtr(block_dm);
2364  // auto m_field_ptr = getInterfacePtr(block_dm);
2365 
2366  auto schur_dofs_row = schur_prb->getNumeredRowDofsPtr();
2367  auto schur_dofs_col = schur_prb->getNumeredColDofsPtr();
2368  auto block_dofs_row = block_prb->getNumeredRowDofsPtr();
2369  auto block_dofs_col = block_prb->getNumeredColDofsPtr();
2370 
2371  auto ao_schur_row = schur_prb->getSubData()->getSmartRowMap();
2372  auto ao_schur_col = schur_prb->getSubData()->getSmartColMap();
2373  auto ao_block_row = block_prb->getSubData()->getSmartRowMap();
2374  auto ao_block_col = block_prb->getSubData()->getSmartColMap();
2375 
2376  auto schur_vec_x = createDMVector(schur_dm);
2377  auto block_vec_x = createDMVector(block_dm);
2378  auto schur_vec_y = vectorDuplicate(schur_vec_x);
2379  auto block_vec_y = vectorDuplicate(block_vec_x);
2380  CHKERR VecSetDM(schur_vec_x, PETSC_NULL);
2381  CHKERR VecSetDM(block_vec_x, PETSC_NULL);
2382  CHKERR VecSetDM(schur_vec_y, PETSC_NULL);
2383  CHKERR VecSetDM(block_vec_y, PETSC_NULL);
2384 
2385  auto find_field_ent = [&](auto uid, auto prb, auto rc) {
2386  boost::shared_ptr<NumeredDofEntity_multiIndex> dofs;
2387 
2388  switch (rc) {
2389  case ROW:
2390  dofs = prb->getNumeredRowDofsPtr();
2391  break;
2392  case COL:
2393  dofs = prb->getNumeredColDofsPtr();
2394  break;
2395  default:
2396  CHK_THROW_MESSAGE(MOFEM_NOT_IMPLEMENTED, "Wrong RowCol");
2397  break;
2398  }
2399 
2400  auto lo = dofs->get<Unique_mi_tag>().lower_bound(uid);
2401  if (lo == dofs->get<Unique_mi_tag>().end())
2402  return boost::shared_ptr<NumeredDofEntity>();
2403  auto hi = dofs->get<Unique_mi_tag>().upper_bound(
2404  DofEntity::getUniqueIdCalculate(MAX_DOFS_ON_ENTITY - 1, uid));
2405  if (lo != hi)
2406  return *lo;
2407 
2408  return boost::shared_ptr<NumeredDofEntity>();
2409  };
2410 
2411  std::array<boost::shared_ptr<BlockStructure>, 4> data_ptrs;
2412 
2413  for (auto r = 0; r != 3; ++r) {
2414  data_ptrs[r] = boost::make_shared<BlockStructure>();
2415  data_ptrs[r]->dataBlocksPtr = block_mat_data_ptr->dataBlocksPtr;
2416  }
2417  data_ptrs[3] = boost::make_shared<BlockStructure>();
2418  data_ptrs[3]->dataBlocksPtr = block_mat_data_ptr->dataBlocksPtr;
2419 
2420  data_ptrs[0]->ghostX = schur_vec_x;
2421  data_ptrs[0]->ghostY = schur_vec_y;
2422  data_ptrs[1]->ghostX = block_vec_x;
2423  data_ptrs[1]->ghostY = schur_vec_y;
2424  data_ptrs[2]->ghostX = schur_vec_x;
2425  data_ptrs[2]->ghostY = block_vec_y;
2426  data_ptrs[3]->ghostX = block_vec_x;
2427  data_ptrs[3]->ghostY = block_vec_y;
2428 
2429  int idx = 0;
2430  for (auto &d : block_mat_data_ptr->blockIndex.get<0>()) {
2431 
2432  auto insert = [&](auto &m, auto &dof_r, auto &dof_c, auto &d) {
2433  m.insert(
2434 
2436  d.getRowUId(), d.getColUId(), d.getFEUId(),
2437 
2438  dof_r->getPetscGlobalDofIdx(), dof_c->getPetscGlobalDofIdx(),
2439 
2440  d.getNbRows(), d.getNbCols(),
2441 
2442  dof_r->getPetscLocalDofIdx(), dof_c->getPetscLocalDofIdx(),
2443 
2444  d.getMatShift()}
2445 
2446  );
2447  };
2448 
2449  auto dof_schur_row = find_field_ent(d.getRowUId(), schur_prb, ROW);
2450  auto dof_schur_col = find_field_ent(d.getColUId(), schur_prb, COL);
2451  auto dof_block_row = find_field_ent(d.getRowUId(), block_prb, ROW);
2452  auto dof_block_col = find_field_ent(d.getColUId(), block_prb, COL);
2453 
2454  if (dof_schur_row && dof_schur_col) {
2455  insert(data_ptrs[0]->blockIndex, dof_schur_row, dof_schur_col, d);
2456  }
2457 
2458  if (dof_schur_row && dof_block_col) {
2459  insert(data_ptrs[1]->blockIndex, dof_schur_row, dof_block_col, d);
2460  }
2461 
2462  if (dof_block_row && dof_schur_col) {
2463  insert(data_ptrs[2]->blockIndex, dof_block_row, dof_schur_col, d);
2464  }
2465 
2466  if (dof_block_row && dof_block_col) {
2467  insert(data_ptrs[3]->blockIndex, dof_block_row, dof_block_col, d);
2468  }
2469 
2470  ++idx;
2471  }
2472 
2473  // set data for a00 solve (inverse blocks)
2474  auto set_up_a00_data = [&](auto inv_block_data) {
2476 
2477  if (add_preconditioner_block) {
2478  auto preconditioned_block = boost::make_shared<std::vector<double>>(
2479  inv_block_data->dataBlocksPtr->size(), 0);
2480  inv_block_data->preconditionerBlocksPtr = preconditioned_block;
2481  inv_block_data->multiplyByPreconditioner = true;
2482  block_mat_data_ptr->preconditionerBlocksPtr =
2483  inv_block_data->preconditionerBlocksPtr;
2484  block_mat_data_ptr->multiplyByPreconditioner = false;
2485  }
2486 
2488  };
2489 
2490  CHKERR set_up_a00_data(data_ptrs[3]);
2491 
2492  MPI_Comm comm;
2493  CHKERR PetscObjectGetComm((PetscObject)schur_dm, &comm);
2494 
2495  auto create_shell_mat = [&](auto nb_r_loc, auto nb_c_loc, auto nb_r_glob,
2496  auto nb_c_glob, auto data_ptr) {
2497  Mat mat_raw;
2498  CHKERR MatCreateShell(comm, nb_r_loc, nb_c_loc, nb_r_glob, nb_c_glob,
2499  (void *)data_ptr.get(), &mat_raw);
2500  CHKERR setSchurBlockMatOps(mat_raw);
2501  return SmartPetscObj<Mat>(mat_raw);
2502  };
2503 
2504  auto schur_nb_global = schur_prb->getNbDofsRow();
2505  auto block_nb_global = block_prb->getNbDofsRow();
2506  auto schur_nb_local = schur_prb->getNbLocalDofsRow();
2507  auto block_nb_local = block_prb->getNbLocalDofsRow();
2508 
2509  std::array<SmartPetscObj<Mat>, 4> mats_array;
2510  mats_array[0] =
2511  create_shell_mat(schur_nb_local, schur_nb_local, schur_nb_global,
2512  schur_nb_global, data_ptrs[0]);
2513  mats_array[1] =
2514  create_shell_mat(schur_nb_local, block_nb_local, schur_nb_global,
2515  block_nb_global, data_ptrs[1]);
2516  mats_array[2] =
2517  create_shell_mat(block_nb_local, schur_nb_local, block_nb_global,
2518  schur_nb_global, data_ptrs[2]);
2519  mats_array[3] =
2520  create_shell_mat(block_nb_local, block_nb_local, block_nb_global,
2521  block_nb_global, data_ptrs[3]);
2522 
2523  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2524  << "(0, 0) " << schur_nb_local << " " << schur_nb_global << " "
2525  << data_ptrs[0]->blockIndex.size();
2526  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2527  << "(0, 1) " << schur_nb_local << " " << block_nb_local << " "
2528  << schur_nb_global << " " << block_nb_global << " "
2529  << data_ptrs[1]->blockIndex.size();
2530  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2531  << "(1, 0) " << block_nb_local << " " << schur_nb_local << " "
2532  << block_nb_global << " " << schur_nb_global << " "
2533  << data_ptrs[2]->blockIndex.size();
2534  MOFEM_TAG_AND_LOG("SYNC", Sev::verbose, "NestedSchur")
2535  << "(1, 1) " << block_nb_local << " " << block_nb_global << " "
2536  << data_ptrs[3]->blockIndex.size();
2537 
2538  MOFEM_LOG_SEVERITY_SYNC(comm, Sev::verbose);
2539 
2540  auto schur_is = schur_prb->getSubData()->getSmartRowIs();
2541  auto block_is = block_prb->getSubData()->getSmartRowIs();
2542 
2543  return boost::make_shared<NestSchurData>(
2544 
2545  mats_array, data_ptrs, block_mat_data_ptr,
2546  std::make_pair(schur_is, block_is)
2547 
2548  );
2549 }
2550 
2551 std::pair<SmartPetscObj<Mat>, boost::shared_ptr<NestSchurData>>
2552 createSchurNestedMatrix(boost::shared_ptr<NestSchurData> schur_net_data_ptr) {
2553 
2554  if (!schur_net_data_ptr)
2556 
2557  auto [mat_arrays, data_ptrs, block_mat_data_ptr, iss] = *schur_net_data_ptr;
2558  auto [schur_is, block_is] = iss;
2559 
2560  std::array<IS, 2> is_a = {schur_is, block_is};
2561  std::array<Mat, 4> mats_a = {
2562 
2563  mat_arrays[0], mat_arrays[1], mat_arrays[2], mat_arrays[3]
2564 
2565  };
2566 
2567  MPI_Comm comm;
2568  CHKERR PetscObjectGetComm((PetscObject)mat_arrays[0], &comm);
2569 
2570  Mat mat_raw;
2571  CHKERR MatCreateNest(
2572 
2573  comm, 2, is_a.data(), 2, is_a.data(), mats_a.data(), &mat_raw
2574 
2575  );
2576 
2577  return std::make_pair(SmartPetscObj<Mat>(mat_raw), schur_net_data_ptr);
2578 }
2579 
2581  return new OpSchurAssembleBegin();
2582 }
2583 
2584 OpSchurAssembleBase *
2585 createOpSchurAssembleEnd(std::vector<std::string> fields_name,
2586  std::vector<boost::shared_ptr<Range>> field_ents,
2588  bool sym_schur, bool symm_op) {
2589  if (symm_op)
2590  return new OpSchurAssembleEnd<SchurDSYSV>(fields_name, field_ents, ao,
2591  schur, sym_schur, symm_op);
2592  else
2593  return new OpSchurAssembleEnd<SchurDGESV>(fields_name, field_ents, ao,
2594  schur, sym_schur, symm_op);
2595 }
2596 
2598  boost::shared_ptr<std::vector<unsigned char>> marker_ptr, double diag_val) {
2599  return new OpSchurZeroRowsAndCols(marker_ptr, diag_val);
2600 }
2601 
2602 OpSchurAssembleBase *
2603 createOpSchurAssembleEnd(std::vector<std::string> fields_name,
2604  std::vector<boost::shared_ptr<Range>> field_ents,
2605  std::vector<SmartPetscObj<AO>> sequence_of_aos,
2606  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
2607  std::vector<bool> sym_schur, bool symm_op,
2608  boost::shared_ptr<BlockStructure> diag_blocks) {
2609  return createOpSchurAssembleEnd(
2610  fields_name, field_ents, sequence_of_aos.back(), sequence_of_mats.back(),
2611  sym_schur.back(), symm_op);
2612 }
2613 
2614 OpSchurAssembleBase *
2615 createOpSchurAssembleEnd(std::vector<std::string> fields_name,
2616  std::vector<boost::shared_ptr<Range>> field_ents,
2617  std::vector<SmartPetscObj<AO>> sequence_of_aos,
2618  std::vector<SmartPetscObj<Mat>> sequence_of_mats,
2619  std::vector<bool> sym_schur,
2620  std::vector<double> diag_eps, bool symm_op,
2621  boost::shared_ptr<BlockStructure> diag_blocks) {
2622  return createOpSchurAssembleEnd(
2623  fields_name, field_ents, sequence_of_aos.back(), sequence_of_mats.back(),
2624  sym_schur.back(), symm_op);
2625 }
2626 
2629 
2630  auto apply = [](PC pc, Vec f, Vec x) {
2632  Mat A, P;
2633  CHKERR PCGetOperators(pc, &A, &P);
2634  CHKERR MatSolve(P, f, x);
2636  };
2637 
2638  CHKERR PCSetType(pc, PCSHELL);
2639  CHKERR PCShellSetApply(pc, apply);
2640  CHKERR PCShellSetName(pc, "MoFEMSchurBlockPC");
2641 
2643 }
2644 
2646  return setSchurA00MatSolvePC(pc);
2647 }
2648 
2649 // This is used to assemble local element matrices for Schur complement
2650 // and matrix for KSP
2651 template <>
2654  const EntitiesFieldData::EntData &col_data,
2655  const MatrixDouble &mat, InsertMode iora) {
2656  return SchurElemMats::MatSetValues(M, row_data, col_data, mat, iora);
2657 }
2658 
2661  const EntitiesFieldData::EntData &col_data,
2662  const MatrixDouble &mat, InsertMode iora) {
2663  return MatSetValues<AssemblyTypeSelector<PETSC>>(M, row_data, col_data, mat,
2664  iora);
2665 }
2666 
2667 // Is assumed that standard PETSc assembly works for matrices used by KSP
2668 SchurBackendMatSetValuesPtr::MatSetValuesPtr
2669  SchurBackendMatSetValuesPtr::matSetValuesPtr = schur_mat_set_values_wrap;
2670 
2671 // We assemble matrix for KSP and store local matrices for Schur complement.
2672 // Schur complement is calculated and assembled in OpSchurAssembleEnd.
2675  const EntitiesFieldData::EntData &col_data,
2676  const MatrixDouble &mat, InsertMode iora) {
2678  CHKERR assembleStorage(row_data, col_data, mat, iora);
2679  CHKERR SchurBackendMatSetValuesPtr::matSetValuesPtr(M, row_data, col_data,
2680  mat, iora);
2682 }
2683 
2684 // All is now wrapped in specialisation of
2685 // MatSetValues<AssemblyTypeSelector<SCHUR>>
2686 template <>
2687 MoFEMErrorCode MatSetValues<AssemblyTypeSelector<SCHUR>>(
2688  Mat M, const EntitiesFieldData::EntData &row_data,
2689  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
2690  InsertMode iora) {
2691  return MatSetValues<SchurElemMats>(M, row_data, col_data, mat, iora);
2692 }
2693 
2694 // Standard (PETSc) assembly block matrices do noe work
2695 template <>
2698  const EntitiesFieldData::EntData &col_data,
2699  const MatrixDouble &mat, InsertMode iora) {
2700  return shell_block_mat_asmb_wrap(M, row_data, col_data, mat, iora);
2701 }
2702 
2704 
2705  static MoFEMErrorCode MatSetValues(Mat M,
2706  const EntitiesFieldData::EntData &row_data,
2707  const EntitiesFieldData::EntData &col_data,
2708  const MatrixDouble &mat, InsertMode iora);
2709 };
2710 
2712  SchurBackendMatSetValuesPtr::matSetValuesBlockPtr =
2714 
2717  const EntitiesFieldData::EntData &row_data,
2718  const EntitiesFieldData::EntData &col_data,
2719  const MatrixDouble &mat, InsertMode iora) {
2721  CHKERR assembleStorage(row_data, col_data, mat, iora);
2722  CHKERR SchurBackendMatSetValuesPtr::matSetValuesBlockPtr(M, row_data,
2723  col_data, mat, iora);
2725 }
2726 
2727 template <>
2730  const EntitiesFieldData::EntData &row_data,
2731  const EntitiesFieldData::EntData &col_data,
2732  const MatrixDouble &mat, InsertMode iora) {
2733  return SchurElemMatsBlock::MatSetValues(M, row_data, col_data, mat, iora);
2734 }
2735 
2737 
2738  static MoFEMErrorCode MatSetValues(Mat M,
2739  const EntitiesFieldData::EntData &row_data,
2740  const EntitiesFieldData::EntData &col_data,
2741  const MatrixDouble &mat, InsertMode iora);
2742 };
2743 
2745  SchurBackendMatSetValuesPtr::matSetValuesPreconditionedBlockPtr =
2747 
2749  Mat M, const EntitiesFieldData::EntData &row_data,
2750  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
2751  InsertMode iora) {
2753  CHKERR assembleStorage(row_data, col_data, mat, iora);
2754  CHKERR SchurBackendMatSetValuesPtr::matSetValuesPreconditionedBlockPtr(
2755  M, row_data, col_data, mat, iora);
2757 }
2758 
2759 template <>
2761  Mat M, const EntitiesFieldData::EntData &row_data,
2762  const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat,
2763  InsertMode iora) {
2764  return SchurElemMatsPreconditionedBlock::MatSetValues(M, row_data, col_data,
2765  mat, iora);
2766 }
2767 
2769 schurSwitchPreconditioner(boost::shared_ptr<BlockStructure> block_mat_data) {
2771  if (block_mat_data->multiplyByPreconditioner) {
2772  block_mat_data->multiplyByPreconditioner = false;
2773  } else {
2774  if (!block_mat_data->preconditionerBlocksPtr) {
2775  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
2776  "preconditionerBlocksPtr not set");
2777  }
2778  block_mat_data->multiplyByPreconditioner = true;
2779  }
2781 }
2782 
2784 schurSaveBlockMesh(boost::shared_ptr<BlockStructure> block_mat_data,
2785  std::string filename) {
2787 
2788  moab::Core core;
2789  moab::Interface &moab = core;
2790 
2791  ReadUtilIface *iface;
2792  CHKERR moab.query_interface(iface);
2793 
2794  auto size = block_mat_data->blockIndex.size();
2795 
2796  EntityHandle starting_vertex;
2797  vector<double *> arrays_coord;
2798  CHKERR iface->get_node_coords(3, 4 * size, 0, starting_vertex, arrays_coord);
2799  EntityHandle starting_handle;
2800  EntityHandle *array;
2801  CHKERR iface->get_element_connect(size, 4, MBQUAD, 0, starting_handle, array);
2802 
2803  double def_val_nrm2 = 0;
2804  Tag th_nrm2;
2805  CHKERR moab.tag_get_handle("nrm2", 1, MB_TYPE_DOUBLE, th_nrm2,
2806  MB_TAG_CREAT | MB_TAG_DENSE, &def_val_nrm2);
2807 
2808  int def_val_mat_shift = 0;
2809  Tag th_mat_shift;
2810  CHKERR moab.tag_get_handle("mat_shift", 1, MB_TYPE_INTEGER, th_mat_shift,
2811  MB_TAG_CREAT | MB_TAG_DENSE, &def_val_mat_shift);
2812 
2813  int i = 0;
2814  for (auto &d : block_mat_data->blockIndex) {
2815  auto row = d.getRow();
2816  auto col = d.getCol();
2817  auto nb_rows = d.getNbRows();
2818  auto nb_cols = d.getNbCols();
2819  auto mat_shift = d.getMatShift();
2820 
2821  // q0
2822  arrays_coord[0][4 * i + 0] = row;
2823  arrays_coord[1][4 * i + 0] = col;
2824  arrays_coord[2][4 * i + 0] = 0;
2825 
2826  // q1
2827  arrays_coord[0][4 * i + 1] = row + nb_rows;
2828  arrays_coord[1][4 * i + 1] = col;
2829  arrays_coord[2][4 * i + 1] = 0;
2830 
2831  // q2
2832  arrays_coord[0][4 * i + 2] = row + nb_rows;
2833  arrays_coord[1][4 * i + 2] = col + nb_cols;
2834  arrays_coord[2][4 * i + 2] = 0;
2835 
2836  // q3
2837  arrays_coord[0][4 * i + 3] = row;
2838  arrays_coord[1][4 * i + 3] = col + nb_cols;
2839  arrays_coord[2][4 * i + 3] = 0;
2840 
2841  // ele conn
2842  array[4 * i + 0] = starting_vertex + 4 * i + 0;
2843  array[4 * i + 1] = starting_vertex + 4 * i + 1;
2844  array[4 * i + 2] = starting_vertex + 4 * i + 2;
2845  array[4 * i + 3] = starting_vertex + 4 * i + 3;
2846 
2847  auto prt = &(*block_mat_data->dataBlocksPtr)[d.getMatShift()];
2848  auto nrm2 = cblas_dnrm2(nb_rows * nb_cols, prt, 1);
2849  EntityHandle ele = starting_handle + i;
2850  CHKERR moab.tag_set_data(th_nrm2, &ele, 1, &nrm2);
2851  CHKERR moab.tag_set_data(th_mat_shift, &ele, 1, &mat_shift);
2852 
2853  ++i;
2854  }
2855 
2856  CHKERR iface->update_adjacencies(starting_handle, size, 4, array);
2857  CHKERR moab.write_file(filename.c_str(), "VTK", "");
2858 
2860 }
2861 
2862 } // namespace MoFEM
UBlasMatrix< double >
NOSPACE
@ NOSPACE
Definition: definitions.h:83
MoFEMFunctionReturnHot
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:460
MoFEM::id_from_handle
auto id_from_handle(const EntityHandle h)
Definition: Templates.hpp:1890
MoFEM::OpSchurZeroRowsAndCols::diagVal
double diagVal
Definition: Schur.cpp:80
MoFEM::SchurElemMats::rowIndices
static boost::ptr_vector< VectorInt > rowIndices
Definition: Schur.cpp:213
MoFEM::EntitiesFieldData::EntData
Data on single entity (This is passed as argument to DataOperator::doWork)
Definition: EntitiesFieldData.hpp:128
MoFEM::DiagBlockIndex
Definition: Schur.cpp:221
MoFEM::EntitiesFieldData::EntData::getFieldEntities
const VectorFieldEntities & getFieldEntities() const
get field entities
Definition: EntitiesFieldData.hpp:1277
MoFEM::OpSchurAssembleEndImpl::invMat
MatrixDouble invMat
Definition: Schur.cpp:127
MoFEM::BlockStructure::dataBlocksPtr
boost::shared_ptr< std::vector< double > > dataBlocksPtr
Definition: Schur.cpp:310
MoFEM::DiagBlockIndex::Indexes::row
int row
Definition: Schur.cpp:260
MoFEM::zero_rows_columns
static PetscErrorCode zero_rows_columns(Mat A, PetscInt N, const PetscInt rows[], PetscScalar diag, Vec x, Vec b)
Definition: Schur.cpp:1303
MoFEM::MatSetValues
MoFEMErrorCode MatSetValues(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const double *ptr, InsertMode iora)
Assemble PETSc matrix.
Definition: EntitiesFieldData.hpp:1644
MoFEM::DiagBlockIndex::blockIndex
BlockIndex blockIndex
blocks indexes storage
Definition: Schur.cpp:302
MoFEM::field_bit_from_bit_number
auto field_bit_from_bit_number(const int bit_number)
get field bit id from bit number
Definition: Templates.hpp:1930
MoFEM::DiagBlockIndex::Indexes::nb_cols
int nb_cols
Definition: Schur.cpp:263
MOFEM_LOG_SEVERITY_SYNC
#define MOFEM_LOG_SEVERITY_SYNC(comm, severity)
Synchronise "SYNC" on curtain severity level.
Definition: LogManager.hpp:352
MoFEM::createISGeneral
auto createISGeneral(MPI_Comm comm, PetscInt n, const PetscInt idx[], PetscCopyMode mode)
Creates a data structure for an index set containing a list of integers.
Definition: PetscSmartObj.hpp:287
MoFEM::createOpSchurAssembleEnd
OpSchurAssembleBase * createOpSchurAssembleEnd(std::vector< std::string > fields_name, std::vector< boost::shared_ptr< Range >> field_ents, std::vector< SmartPetscObj< AO >> sequence_of_aos, std::vector< SmartPetscObj< Mat >> sequence_of_mats, std::vector< bool > sym_schur, std::vector< double > diag_eps, bool symm_op, boost::shared_ptr< BlockStructure > diag_blocks)
Definition: Schur.cpp:2615
MOFEM_LOG_CHANNEL
#define MOFEM_LOG_CHANNEL(channel)
Set and reset channel.
Definition: LogManager.hpp:284
EntityHandle
MoFEM::BlockStructure::preconditionerBlocksPtr
boost::shared_ptr< std::vector< double > > preconditionerBlocksPtr
Definition: Schur.cpp:311
OP_SCHUR_ASSEMBLE_BASE
lapack_dgetri
static __CLPK_integer lapack_dgetri(__CLPK_integer n, __CLPK_doublereal *a, __CLPK_integer lda, __CLPK_integer *ipiv, __CLPK_doublereal *work, __CLPK_integer lwork)
Definition: lapack_wrap.h:185
MoFEM::FEMethod
structure for User Loop Methods on finite elements
Definition: LoopMethods.hpp:373
MoFEM::solve_schur_block_shell
static MoFEMErrorCode solve_schur_block_shell(Mat mat, Vec y, Vec x, InsertMode iora)
Definition: Schur.cpp:1672
MoFEM::createOpSchurZeroRowsAndCols
OpSchurAssembleBase * createOpSchurZeroRowsAndCols(boost::shared_ptr< std::vector< unsigned char >> marker_ptr, double diag_val)
Create a Op Schur Zero Rows And Cols object.
Definition: Schur.cpp:2597
MoFEM::MatSetValues< BlockStructure >
MoFEMErrorCode MatSetValues< BlockStructure >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2697
MoFEM::cmp_uid_hi
static auto cmp_uid_hi(const UId &b, const boost::weak_ptr< FieldEntity > &a)
Definition: ForcesAndSourcesCore.cpp:29
CHK_THROW_MESSAGE
#define CHK_THROW_MESSAGE(err, msg)
Check and throw MoFEM exception.
Definition: definitions.h:609
MoFEM::SchurElemMats::SchurElemStorage
multi_index_container< const SchurElemMats *, indexed_by< ordered_unique< tag< uid_mi_tag >, composite_key< SchurElemMats, member< SchurElemMats, const UId, &SchurElemMats::uidRow >, member< SchurElemMats, const UId, &SchurElemMats::uidCol > > >, ordered_non_unique< tag< col_mi_tag >, member< SchurElemMats, const UId, &SchurElemMats::uidCol > > > > SchurElemStorage
Definition: Schur.cpp:210
MoFEM::OpSchurAssembleBegin::OpSchurAssembleBegin
OpSchurAssembleBegin()
Definition: Schur.cpp:520
MoFEM::shell_block_mat_asmb_wrap_impl
MoFEMErrorCode shell_block_mat_asmb_wrap_impl(BlockStructure *ctx, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora, boost::function< int(const DiagBlockIndex::Indexes *)> shift_extractor, boost::shared_ptr< std::vector< double >> data_blocks_ptr)
Definition: Schur.cpp:2145
MoFEM::solve_add
static PetscErrorCode solve_add(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1299
MoFEM::OpSchurAssembleBase::OpSchurAssembleBase
OpSchurAssembleBase()=delete
lapack_dgesv
static __CLPK_integer lapack_dgesv(__CLPK_integer n, __CLPK_integer nrhs, __CLPK_doublereal *a, __CLPK_integer lda, __CLPK_integer *ipiv, __CLPK_doublereal *b, __CLPK_integer ldb)
Definition: lapack_wrap.h:176
MoFEM::Exceptions::MoFEMErrorCode
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
Definition: Exceptions.hpp:56
MoFEM::Types::MatrixDouble
UBlasMatrix< double > MatrixDouble
Definition: Types.hpp:77
MoFEM::CoreInterface::get_field_bit_number
virtual FieldBitNumber get_field_bit_number(const std::string name) const =0
get field bit number
MoFEM::OpSchurAssembleEnd< SchurDSYSV >
Definition: Schur.cpp:142
MoFEM::BlockStructure::ghostX
SmartPetscObj< Vec > ghostX
Definition: Schur.cpp:307
A
constexpr AssemblyType A
Definition: operators_tests.cpp:30
MoFEM::SchurElemMats::getMat
auto & getMat() const
Definition: Schur.cpp:170
MoFEM::SchurDSYSV
Definition: Schur.cpp:1001
MoFEM::OpSchurAssembleEndImpl::fieldEnts
std::vector< boost::shared_ptr< Range > > fieldEnts
Definition: Schur.cpp:121
MoFEM::SchurBackendMatSetValuesPtr::MatSetValuesPtr
boost::function< MoFEMErrorCode(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)> MatSetValuesPtr
Definition: Schur.hpp:232
MoFEM::DiagBlockIndex::Indexes::getNbRows
int getNbRows() const
Definition: Schur.cpp:242
MoFEM::SchurEvents::SchurEvents
SchurEvents()
Definition: Schur.cpp:325
MoFEM::SchurEvents::MOFEM_EVENT_BlockStructureMult
static PetscLogEvent MOFEM_EVENT_BlockStructureMult
Definition: Schur.hpp:30
MoFEM::DiagBlockIndex::Indexes::col
int col
Definition: Schur.cpp:261
MoFEM::shell_block_preconditioner_mat_asmb_wrap
MoFEMErrorCode shell_block_preconditioner_mat_asmb_wrap(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2319
MoFEM::SchurElemMats::colIndices
static boost::ptr_vector< VectorInt > colIndices
Definition: Schur.cpp:214
MoFEM::SchurEvents::MOFEM_EVENT_schurMatSetValues
static PetscLogEvent MOFEM_EVENT_schurMatSetValues
Definition: Schur.hpp:27
MoFEM::BlockStructure::ghostY
SmartPetscObj< Vec > ghostY
Definition: Schur.cpp:308
sdf.r
int r
Definition: sdf.py:8
MoFEM::OpSchurAssembleEndImpl::bM
MatrixDouble bM
Definition: Schur.cpp:128
MoFEM::SchurEvents::MOFEM_EVENT_AssembleSchurMat
static PetscLogEvent MOFEM_EVENT_AssembleSchurMat
Definition: Schur.hpp:32
MoFEM::OpSchurAssembleBaseImpl::OpSchurAssembleBaseImpl
OpSchurAssembleBaseImpl()=delete
MoFEM::DiagBlockIndex::Indexes::getCol
int getCol() const
Definition: Schur.cpp:241
MoFEM::DeprecatedCoreInterface
Deprecated interface functions.
Definition: DeprecatedCoreInterface.hpp:16
MoFEM::getBlockMatStorageMat
boost::shared_ptr< std::vector< double > > getBlockMatStorageMat(Mat B)
Get the Block Storage object.
Definition: Schur.cpp:2132
ROW
@ ROW
Definition: definitions.h:136
MoFEM::OpSchurAssembleEndImpl::OpSchurAssembleEndImpl
OpSchurAssembleEndImpl(std::vector< std::string > fields_name, std::vector< boost::shared_ptr< Range >> field_ents, SmartPetscObj< AO > schur_ao, SmartPetscObj< Mat > schur_mat, bool sym_schur, bool symm_op)
Construct a new Op Schur Assemble End object.
Definition: Schur.cpp:583
MoFEM::DiagBlockIndex::Indexes
block data indexes
Definition: Schur.cpp:229
MoFEM::debug_schur
constexpr bool debug_schur
Definition: Schur.cpp:12
MoFEM::Interface
DeprecatedCoreInterface Interface
Definition: Interface.hpp:2010
MoFEM::DiagBlockIndex::Indexes::nb_rows
int nb_rows
Definition: Schur.cpp:262
MoFEM::schur_mat_set_values_wrap
MoFEMErrorCode schur_mat_set_values_wrap(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2660
c
const double c
speed of light (cm/ns)
Definition: initial_diffusion.cpp:39
MoFEM::SchurElemMats::col_mi_tag
Definition: Schur.cpp:191
MoFEM::DiagBlockIndex::Indexes::colShift
int colShift() const
Definition: Schur.cpp:252
MoFEM::SchurElemMats::iDX
const size_t iDX
Definition: Schur.cpp:166
MoFEM::SchurElemMats::uidCol
UId uidCol
Definition: Schur.cpp:168
PlasticOps::M
FTensor::Index< 'M', 3 > M
Definition: PlasticOps.hpp:117
MoFEM::OpSchurAssembleEnd< SchurDGESV >
Definition: Schur.cpp:150
CHKERR
#define CHKERR
Inline error check.
Definition: definitions.h:548
MoFEM::DiagBlockIndex::Indexes::getRow
int getRow() const
Definition: Schur.cpp:240
MoFEM::createDMVector
auto createDMVector(DM dm)
Get smart vector from DM.
Definition: DMMoFEM.hpp:1099
MoFEM::mult_add
static PetscErrorCode mult_add(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1284
MoFEM::createBlockMat
SchurShellMatData createBlockMat(DM dm, boost::shared_ptr< BlockStructure > data)
Create a Schur Mat object.
Definition: Schur.cpp:1452
MoFEM::OpSchurAssembleEndImpl::symSchur
bool symSchur
Definition: Schur.cpp:124
MoFEM::DMoFEMLoopFiniteElementsUpAndLowRank
PetscErrorCode DMoFEMLoopFiniteElementsUpAndLowRank(DM dm, const char fe_name[], MoFEM::FEMethod *method, int low_rank, int up_rank, CacheTupleWeakPtr cache_ptr=CacheTupleSharedPtr())
Executes FEMethod for finite elements in DM.
Definition: DMMoFEM.cpp:567
MoFEM::mat_zero
static PetscErrorCode mat_zero(Mat m)
Definition: Schur.cpp:1423
MoFEM::OpSchurAssembleBegin
Clear Schur complement internal data.
Definition: Schur.cpp:61
MoFEM::SmartPetscObj::use_count
int use_count() const
Definition: PetscSmartObj.hpp:109
MoFEM::OpSchurAssembleEndImpl::abM
MatrixDouble abM
Definition: Schur.cpp:128
MoFEM::SchurElemMats::SchurElemMats
SchurElemMats(const size_t idx, const UId uid_row, const UId uid_col)
Definition: Schur.cpp:342
MoFEM
implementation of Data Operators for Forces and Sources
Definition: Common.hpp:10
MoFEM::SchurEvents::MOFEM_EVENT_BlockStructureSolve
static PetscLogEvent MOFEM_EVENT_BlockStructureSolve
Definition: Schur.hpp:31
a
constexpr double a
Definition: approx_sphere.cpp:30
MoFEM::schurSwitchPreconditioner
MoFEMErrorCode schurSwitchPreconditioner(boost::shared_ptr< BlockStructure > block_mat_data)
Switch preconditioner.
Definition: Schur.cpp:2769
BITFIELDID_SIZE
#define BITFIELDID_SIZE
max number of fields
Definition: definitions.h:233
MoFEM::SchurElemMats::assembleStorage
static MoFEMErrorCode assembleStorage(const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:347
MoFEM::DiagBlockIndex::Indexes::mat_shift
int mat_shift
Definition: Schur.cpp:266
MoFEM::FieldEntity::getHandleFromUniqueId
static auto getHandleFromUniqueId(const UId uid)
Get the Handle From Unique Id.
Definition: FieldEntsMultiIndices.hpp:192
MoFEM::Types::UId
uint128_t UId
Unique Id.
Definition: Types.hpp:31
MoFEM::SchurDGESV::invertMat
static auto invertMat(MatrixDouble &m, MatrixDouble &inv)
Definition: Schur.cpp:1039
MoFEM::DiagBlockIndex::~DiagBlockIndex
virtual ~DiagBlockIndex()=default
MoFEM::SchurElemMats::uid_mi_tag
Definition: Schur.cpp:190
MoFEM::OpSchurAssembleEndImpl::blockMat
MatrixDouble blockMat
Definition: Schur.cpp:126
MoFEM::SchurElemMats::getColInd
auto & getColInd() const
Definition: Schur.cpp:172
MoFEM::BlockStructure::parentBlockStructurePtr
boost::shared_ptr< std::vector< double > > parentBlockStructurePtr
Definition: Schur.cpp:312
convert.type
type
Definition: convert.py:64
MoFEM::FieldEntity::getLocalUniqueIdCalculate
UId getLocalUniqueIdCalculate()
Get the Local Unique Id Calculate object.
Definition: FieldEntsMultiIndices.hpp:136
MoFEM::DiagBlockIndex::Indexes::rowShift
int rowShift() const
Definition: Schur.cpp:248
MoFEM::OpSchurAssembleEndImpl::abcM
MatrixDouble abcM
Definition: Schur.cpp:128
MoFEM::DiagBlockIndex::Indexes::uid_col
UId uid_col
Definition: Schur.cpp:258
MoFEM::SchurElemMats::~SchurElemMats
virtual ~SchurElemMats()=default
EshelbianPlasticity::P
@ P
Definition: EshelbianContact.cpp:201
MoFEM::mult_schur_block_shell
static MoFEMErrorCode mult_schur_block_shell(Mat mat, Vec x, Vec y, InsertMode iora, boost::function< int(DiagBlockIndex::BlockIndex::nth_index< 0 >::type::iterator)> shift_extractor, boost::shared_ptr< std::vector< double >> data_blocks_ptr, bool multiply_by_preconditioner)
Definition: Schur.cpp:1488
MoFEM::DiagBlockIndex::Indexes::getLocRow
int getLocRow() const
Definition: Schur.cpp:244
MoFEM::cmp_uid_lo
static auto cmp_uid_lo(const boost::weak_ptr< FieldEntity > &a, const UId &b)
Definition: ForcesAndSourcesCore.cpp:18
MoFEM::DiagBlockIndex::Indexes::loc_col
int loc_col
Definition: Schur.cpp:265
COL
@ COL
Definition: definitions.h:136
MoFEM::max_gemv_size
constexpr int max_gemv_size
Definition: Schur.cpp:1486
MoFEM::OpSchurAssembleBegin::doWork
MoFEMErrorCode doWork(int side, EntityType type, EntitiesFieldData::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
Definition: Schur.cpp:523
MoFEM::SchurElemMats::schurL2Storage
static SchurElemStorage schurL2Storage
Definition: Schur.cpp:218
MoFEM::SchurDSYSV::invertMat
static auto invertMat(MatrixDouble &m, MatrixDouble &inv)
Definition: Schur.cpp:1003
MoFEM::EntitiesFieldData::EntData::getIndices
const VectorInt & getIndices() const
Get global indices of dofs on entity.
Definition: EntitiesFieldData.hpp:1214
MoFEM::OpSchurAssembleBaseImpl::assembleSchurMat
MoFEMErrorCode assembleSchurMat(Mat S, const UId &uid_row, VectorInt &row_ind, const UId &uid_col, VectorInt &col_ind, MatrixDouble &m, InsertMode iora)
Assemble Schur complement.
Definition: Schur.cpp:38
MoFEM::OpSchurZeroRowsAndCols::OpSchurZeroRowsAndCols
OpSchurZeroRowsAndCols(boost::shared_ptr< std::vector< unsigned char >> marker_ptr, double diag_val)
Definition: Schur.cpp:536
MoFEM::createOpSchurAssembleBegin
OpSchurAssembleBase * createOpSchurAssembleBegin()
Definition: Schur.cpp:2580
MoFEM::SchurElemMats::maxIndexCounter
static size_t maxIndexCounter
Definition: Schur.cpp:216
lapack_dgetrf
static __CLPK_integer lapack_dgetrf(__CLPK_integer m, __CLPK_integer n, __CLPK_doublereal *a, __CLPK_integer lda, __CLPK_integer *ipiv)
Definition: lapack_wrap.h:157
MoFEM::SchurEvents::MOFEM_EVENT_opSchurAssembleEnd
static PetscLogEvent MOFEM_EVENT_opSchurAssembleEnd
Definition: Schur.hpp:28
MoFEM::solve
static PetscErrorCode solve(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1296
MoFEM::SchurElemMatsBlock
Definition: Schur.cpp:2703
MoFEM::DiagBlockIndex::Indexes::getLocCol
int getLocCol() const
Definition: Schur.cpp:245
MOFEM_OPERATION_UNSUCCESSFUL
@ MOFEM_OPERATION_UNSUCCESSFUL
Definition: definitions.h:34
MoFEM::type_from_handle
auto type_from_handle(const EntityHandle h)
get type from entity handle
Definition: Templates.hpp:1898
MoFEM::EntitiesFieldData::EntData::getFieldDofs
const VectorDofs & getFieldDofs() const
get dofs data stature FEDofEntity
Definition: EntitiesFieldData.hpp:1269
MoFEM::mult
static PetscErrorCode mult(Mat mat, Vec x, Vec y)
Definition: Schur.cpp:1272
MoFEM::PetscGlobalIdx_mi_tag
Definition: TagMultiIndices.hpp:38
MoFEM::DiagBlockIndex::Indexes::getMatShift
int & getMatShift() const
Definition: Schur.cpp:246
MoFEM::createSchurNestedMatrix
std::pair< SmartPetscObj< Mat >, boost::shared_ptr< NestSchurData > > createSchurNestedMatrix(boost::shared_ptr< NestSchurData > schur_net_data_ptr)
Create a Mat Diag Blocks object.
Definition: Schur.cpp:2552
MoFEM::MatSetValues< SchurElemMatsPreconditionedBlock >
MoFEMErrorCode MatSetValues< SchurElemMatsPreconditionedBlock >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2760
debug
static const bool debug
Definition: dm_create_subdm.cpp:12
i
FTensor::Index< 'i', SPACE_DIM > i
Definition: hcurl_divergence_operator_2d.cpp:27
MoFEM::SchurElemMatsPreconditionedBlock
Definition: Schur.cpp:2736
MoFEM::DiagBlockIndex::Indexes::getFEUId
UId getFEUId() const
Definition: Schur.cpp:239
MoFEM::assembleBlockMatSchur
MoFEMErrorCode assembleBlockMatSchur(MoFEM::Interface &m_field, Mat B, Mat S, std::vector< std::string > fields_name, std::vector< boost::shared_ptr< Range >> field_ents, SmartPetscObj< AO > ao)
Assemble Schur matrix.
Definition: Schur.cpp:1817
MoFEM::SchurElemMats::schurElemMats
static boost::ptr_vector< SchurElemMats > schurElemMats
Definition: Schur.cpp:215
MoFEM::ForcesAndSourcesCore::UserDataOperator::getFEMethod
const FEMethod * getFEMethod() const
Return raw pointer to Finite Element Method object.
Definition: ForcesAndSourcesCore.hpp:1043
MoFEM::SchurElemMats::MatSetValues
static MoFEMErrorCode MatSetValues(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2674
MoFEM::OpSchurAssembleBase
Definition: Schur.hpp:41
MoFEM::setSchurMatSolvePC
MoFEMErrorCode setSchurMatSolvePC(SmartPetscObj< PC > pc)
Definition: Schur.cpp:2645
convert.n
n
Definition: convert.py:82
MoFEM::SchurElemMats
Schur complement data storage.
Definition: Schur.cpp:161
MoFEM::OpSchurZeroRowsAndCols
Definition: Schur.cpp:69
MoFEM::OpSchurAssembleEndImpl::fieldsName
std::vector< std::string > fieldsName
Definition: Schur.cpp:120
MoFEM::setSchurA00MatSolvePC
MoFEMErrorCode setSchurA00MatSolvePC(SmartPetscObj< PC > pc)
Set PC for A00 block.
Definition: Schur.cpp:2627
N
const int N
Definition: speed_test.cpp:3
MoFEM::OpSchurZeroRowsAndCols::doWork
MoFEMErrorCode doWork(int side, EntityType type, EntitiesFieldData::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
Definition: Schur.cpp:542
MAX_DOFS_ON_ENTITY
#define MAX_DOFS_ON_ENTITY
Maximal number of DOFs on entity.
Definition: definitions.h:249
MoFEM::OpSchurAssembleEndImpl::schurMat
SmartPetscObj< Mat > schurMat
Definition: Schur.cpp:123
MoFEM::DiagBlockIndex::Indexes::Indexes
Indexes(UId uid_row, UId uid_col, UId uid_fe, int row, int col, int nb_rows, int nb_cols, int loc_row, int loc_col, int mat_shift)
Definition: Schur.cpp:231
v
const double v
phase velocity of light in medium (cm/ns)
Definition: initial_diffusion.cpp:40
block_mat
SmartPetscObj< Mat > block_mat
Definition: schur_test_diag_mat.cpp:39
MoFEM::setSchurBlockMatOps
static MoFEMErrorCode setSchurBlockMatOps(Mat mat_raw)
Definition: Schur.cpp:1435
MoFEM::EssentialBcStorage
[Storage and set boundary conditions]
Definition: FormsIntegrators.hpp:14
MoFEM::OpSchurAssembleEndImpl
Assemble Schur complement (Implementation)
Definition: Schur.cpp:88
MOFEM_TAG_AND_LOG
#define MOFEM_TAG_AND_LOG(channel, severity, tag)
Tag and log in channel.
Definition: LogManager.hpp:362
MoFEM::getInterfacePtr
auto getInterfacePtr(DM dm)
Get the Interface Ptr object.
Definition: DMMoFEM.hpp:1033
MOFEM_LOG
#define MOFEM_LOG(channel, severity)
Log.
Definition: LogManager.hpp:308
MoFEM::vectorDuplicate
SmartPetscObj< Vec > vectorDuplicate(Vec vec)
Create duplicate vector of smart vector.
Definition: PetscSmartObj.hpp:221
MoFEM::OpSchurZeroRowsAndCols::markerPtr
boost::shared_ptr< std::vector< unsigned char > > markerPtr
Definition: Schur.cpp:79
MoFEM::getBlockMatPrconditionerStorageMat
boost::shared_ptr< std::vector< double > > getBlockMatPrconditionerStorageMat(Mat B)
Get the Block Storage object.
Definition: Schur.cpp:2139
MoFEM::DiagBlockIndex::Indexes::loc_row
int loc_row
Definition: Schur.cpp:264
MoFEM::shell_block_mat_asmb_wrap
MoFEMErrorCode shell_block_mat_asmb_wrap(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2299
MoFEM::MatSetValues< SchurElemMats >
MoFEMErrorCode MatSetValues< SchurElemMats >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2653
MoFEM::Core
CoreTmp< 0 > Core
Definition: Core.hpp:1148
HenckyOps::f
auto f
Definition: HenckyOps.hpp:15
MoFEM::Types::VectorInt
UBlasVector< int > VectorInt
Definition: Types.hpp:67
j
FTensor::Index< 'j', 3 > j
Definition: matrix_function.cpp:19
MoFEM::SchurElemMats::uidRow
UId uidRow
Definition: Schur.cpp:167
MoFEM::createSchurNestedMatrixStruture
boost::shared_ptr< NestSchurData > createSchurNestedMatrixStruture(std::pair< SmartPetscObj< DM >, SmartPetscObj< DM >> dms, boost::shared_ptr< BlockStructure > block_mat_data_ptr, std::vector< std::string > fields_names, std::vector< boost::shared_ptr< Range >> field_ents, bool add_preconditioner_block)
Get the Schur Nest Mat Array object.
Definition: Schur.cpp:2343
MoFEM::OpSchurAssembleEnd
Assemble Schur complement.
Definition: Schur.cpp:139
MoFEM::Exceptions::ierr
static MoFEMErrorCodeGeneric< PetscErrorCode > ierr
Definition: Exceptions.hpp:76
MoFEM::getProblemPtr
auto getProblemPtr(DM dm)
get problem pointer from DM
Definition: DMMoFEM.hpp:1044
EigenMatrix::Vec
const FTensor::Tensor2< T, Dim, Dim > Vec
Definition: MatrixFunction.hpp:64
MOFEM_DATA_INCONSISTENCY
@ MOFEM_DATA_INCONSISTENCY
Definition: definitions.h:31
MoFEM::DiagBlockIndex::Indexes::getRowUId
UId getRowUId() const
Definition: Schur.cpp:237
sdf_hertz_2d_axisymm_plane.d
float d
Definition: sdf_hertz_2d_axisymm_plane.py:4
FTensor::transform
Tensor2_Expr< transform_Tensor2< A, B, T, Dim0, Dim1, i, j >, T, Dim0, Dim1, i, j > transform(const Tensor2_Expr< A, T, Dim0, Dim1, i, j > &a, B function)
Definition: Tensor2_transform.hpp:27
MoFEM::SchurShellMatData
std::pair< SmartPetscObj< Mat >, boost::shared_ptr< BlockStructure > > SchurShellMatData
Definition: Schur.hpp:98
MoFEM::DiagBlockIndex::BlockIndex
multi_index_container< Indexes, indexed_by< ordered_unique< composite_key< Indexes, const_mem_fun< Indexes, UId, &Indexes::getRowUId >, const_mem_fun< Indexes, UId, &Indexes::getColUId > > >, ordered_non_unique< const_mem_fun< Indexes, UId, &Indexes::getFEUId > >, ordered_non_unique< const_mem_fun< Indexes, UId, &Indexes::getRowUId > >, ordered_non_unique< const_mem_fun< Indexes, UId, &Indexes::getColUId > > >> BlockIndex
Definition: Schur.cpp:300
MoFEM::Types::VectorDouble
UBlasVector< double > VectorDouble
Definition: Types.hpp:68
MoFEM::DiagBlockIndex::Indexes::getNbCols
int getNbCols() const
Definition: Schur.cpp:243
MoFEM::DiagBlockIndex::Indexes::uid_row
UId uid_row
Definition: Schur.cpp:257
m
FTensor::Index< 'm', 3 > m
Definition: shallow_wave.cpp:80
MoFEM::BlockStructure::multiplyByPreconditioner
bool multiplyByPreconditioner
Definition: Schur.cpp:314
MoFEM::OpSchurAssembleBaseImpl
Definition: Schur.cpp:14
MoFEM::SchurElemMats::locMats
static boost::ptr_vector< MatrixDouble > locMats
Definition: Schur.cpp:212
MoFEM::createBlockMatStructure
boost::shared_ptr< BlockStructure > createBlockMatStructure(DM dm, SchurFEOpsFEandFields schur_fe_op_vec)
Create a Mat Diag Blocks object.
Definition: Schur.cpp:1082
MoFEMFunctionBeginHot
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:453
MoFEM::Unique_mi_tag
Definition: TagMultiIndices.hpp:18
MoFEM::DiagBlockIndex::Indexes::getColUId
UId getColUId() const
Definition: Schur.cpp:238
sdf_wavy_2d.ind
float ind
Definition: sdf_wavy_2d.py:7
MoFEM::SmartPetscObj< AO >
MoFEM::MatSetValues< SchurElemMatsBlock >
MoFEMErrorCode MatSetValues< SchurElemMatsBlock >(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const MatrixDouble &mat, InsertMode iora)
Definition: Schur.cpp:2729
MoFEM::ent_form_type_and_id
auto ent_form_type_and_id(const EntityType type, const EntityID id)
get entity handle from type and id
Definition: Templates.hpp:1906
MoFEM::BlockStructure
Definition: Schur.cpp:305
MoFEM::SchurDGESV
Definition: Schur.cpp:1037
MoFEM::SchurEvents::MOFEM_EVENT_zeroRowsAndCols
static PetscLogEvent MOFEM_EVENT_zeroRowsAndCols
Definition: Schur.hpp:33
MoFEM::OpSchurAssembleEndImpl::schurAO
SmartPetscObj< AO > schurAO
Definition: Schur.cpp:122
MoFEM::schurSaveBlockMesh
MoFEMErrorCode schurSaveBlockMesh(boost::shared_ptr< BlockStructure > block_mat_data, std::string filename)
Save block matrix as a mesh.
Definition: Schur.cpp:2784
MoFEM::SchurElemMats::getRowInd
auto & getRowInd() const
Definition: Schur.cpp:171
MoFEM::FieldEntity::getFieldBitNumberFromUniqueId
static auto getFieldBitNumberFromUniqueId(const UId uid)
Get the Field Bit Number From Unique Id.
Definition: FieldEntsMultiIndices.hpp:205
convert.int
int
Definition: convert.py:64
MoFEMFunctionReturn
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:429
MOFEM_NOT_IMPLEMENTED
@ MOFEM_NOT_IMPLEMENTED
Definition: definitions.h:32
MoFEM::DiagBlockIndex::Indexes::uid_fe
UId uid_fe
Definition: Schur.cpp:259
MoFEM::SchurEvents::MOFEM_EVENT_BlockStructureSetValues
static PetscLogEvent MOFEM_EVENT_BlockStructureSetValues
Definition: Schur.hpp:29
MoFEMFunctionBegin
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:359
MoFEM::SchurFEOpsFEandFields
std::vector< std::pair< std::string, std::vector< SchurFieldPair > > > SchurFEOpsFEandFields
Definition: Schur.hpp:83
MoFEM::OpSchurAssembleEndImpl::doWorkImpl
MoFEMErrorCode doWorkImpl(int side, EntityType type, EntitiesFieldData::EntData &data)
Definition: Schur.cpp:594
MoFEM::isAllGather
auto isAllGather(IS is)
IS All gather.
Definition: PetscSmartObj.hpp:300