In this tutorial, we show how to create sub-problems by using discrete manager (DM) and sub-problems DMs to create nested matrices and use them with field-split preconditioner. This example shows simplified use of field-split preconditioner, for more advanced example of use of sub discrete managers and field-split preconditioner see cell_forces.cpp
The field-split preconditioner is a block solver where on a block of matrices we apply Jacobi or Gauss–Seidel iterations. Alternatively, if a matrix has 2x2 blocks, a Schur complement can be approximated by preconditioner. We do not intend to explain mathematical details of block solver but merely show how to use PETSc preconditioner, i.e. PCFIELDSPLIT (see http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/PC/PCFIELDSPLIT.html).
\[
\left[
\begin{array}{cc}
\mathbf{K}+\mathbf{S} & \mathbf{C}^\textrm{T}\\
\mathbf{C} & \mathbf{0}
\end{array}
\right]
\left\{
\begin{array}{c}
\mathbf{U}\\
\mathbf{L}
\end{array}
\right\}
=
\left[
\begin{array}{c}
\mathbf{F}+\mathbf{S}\overline{U} \\
\mathbf{g}
\end{array}
\right],\\
\mathbf{K}=
\int_\Omega (\nabla \boldsymbol\phi)^\textrm{T} \nabla \boldsymbol\phi \textrm{d}\Omega,\quad
\mathbf{C} = \int_{\partial\Omega} \boldsymbol\psi^\textrm{T} \boldsymbol\phi \textrm{d}\partial\Omega,\\
\mathbf{F} = \int_\Omega \boldsymbol\phi^\textrm{T} f \textrm{d}\Omega,\quad
\mathbf{g} = \int_{\partial\Omega} \boldsymbol\psi^\textrm{T} \overline{u} \textrm{d}\partial\Omega,\\
\mathbf{S} = \int_{\partial\Omega} \boldsymbol\phi^\textrm{T} \boldsymbol\phi \textrm{d}\partial\Omega.
\]
#include <PoissonOperators.hpp>
static char help[] =
"...\n\n";
static const bool debug =
false;
double operator()(
const double x,
const double y,
const double z)
const {
return 1 + x * x + y * y + z * z * z;
}
};
const double z) const {
grad(0) = 2 * x;
grad(1) = 2 * y;
grad(2) = 3 * z * z;
return grad;
}
};
double operator()(
const double x,
const double y,
const double z)
const {
return 4 + 6 * z;
}
};
struct OpS :
public FaceElementForcesAndSourcesCore::UserDataOperator {
true),
EntitiesFieldData::EntData &row_data,
EntitiesFieldData::EntData &col_data) {
nbRows = row_data.getIndices().size();
nbCols = col_data.getIndices().size();
if (row_side == col_side && row_type == col_type) {
} else {
}
}
private:
EntitiesFieldData::EntData &col_data) {
double area = getArea() *
bEta;
auto t_w = getFTensor0IntegrationWeight();
auto t_row_base = row_data.getFTensor0N();
const double alpha = t_w * area;
for (
int rr = 0; rr !=
nbRows; rr++) {
auto t_col_base = col_data.getFTensor0N(gg, 0);
for (
int cc = 0; cc !=
nbCols; cc++) {
a +=
alpha * t_row_base * t_col_base;
++t_col_base;
}
++t_row_base;
}
++t_w;
}
}
EntitiesFieldData::EntData &col_data) {
const int *row_indices = &*row_data.getIndices().data().begin();
const int *col_indices = &*col_data.getIndices().data().begin();
Mat
B = getFEMethod()->ksp_B != PETSC_NULL ? getFEMethod()->ksp_B
: getFEMethod()->snes_B;
&*
locMat.data().begin(), ADD_VALUES);
&*
locMat.data().begin(), ADD_VALUES);
}
}
};
int main(
int argc,
char *argv[]) {
moab::Core moab_core;
moab::Interface &moab = moab_core;
try {
PetscBool flg_test = PETSC_FALSE;
CHKERR PetscOptionsBegin(PETSC_COMM_WORLD,
"",
"Poisson's problem options",
"none");
PETSC_NULL);
CHKERR PetscOptionsBool(
"-test",
"if true is ctest",
"", flg_test,
&flg_test, PETSC_NULL);
ierr = PetscOptionsEnd();
boost::shared_ptr<ForcesAndSourcesCore>
domain_lhs_fe;
boost::shared_ptr<ForcesAndSourcesCore>
boundary_lhs_fe;
boost::shared_ptr<ForcesAndSourcesCore>
domain_rhs_fe;
boost::shared_ptr<ForcesAndSourcesCore>
boundary_rhs_fe;
boost::shared_ptr<ForcesAndSourcesCore>
domain_error;
boost::shared_ptr<PoissonExample::PostProcFE>
post_proc_volume;
boost::shared_ptr<ForcesAndSourcesCore> null;
boost::shared_ptr<ForcesAndSourcesCore> boundary_penalty_lhs_fe;
{
boundary_lhs_fe, domain_rhs_fe, boundary_rhs_fe, false);
global_error, domain_error);
const double beta = 1;
boundary_penalty_lhs_fe = boost::shared_ptr<ForcesAndSourcesCore>(
boundary_penalty_lhs_fe->getOpPtrVector().push_back(
new OpS(beta));
boundary_rhs_fe->getOpPtrVector().push_back(
}
Simple *simple_interface;
{
CHKERR simple_interface->getOptions();
CHKERR simple_interface->loadFile();
1);
CHKERR simple_interface->addBoundaryField(
"L",
H1,
CHKERR simple_interface->addDataField(
"ERROR",
L2,
CHKERR simple_interface->setFieldOrder(
"U",
CHKERR simple_interface->setFieldOrder(
"L",
CHKERR simple_interface->setFieldOrder(
"ERROR", 0);
CHKERR simple_interface->setUp();
}
DM dm;
CHKERR simple_interface->getDM(&dm);
{
CHKERR DMCreateGlobalVector(dm, &
F);
DM dm_sub_KK, dm_sub_LU;
ublas::matrix<Mat> nested_matrices(2, 2);
ublas::vector<IS> nested_is(2);
CHKERR DMCreate(PETSC_COMM_WORLD, &dm_sub_KK);
CHKERR DMSetType(dm_sub_KK,
"DMMOFEM");
CHKERR DMSetFromOptions(dm_sub_KK);
simple_interface->getDomainFEName().c_str());
simple_interface->getBoundaryFEName().c_str());
CHKERR DMCreateMatrix(dm_sub_KK, &nested_matrices(0, 0));
domain_lhs_fe->ksp_B = nested_matrices(0, 0);
dm_sub_KK, simple_interface->getDomainFEName(), domain_lhs_fe);
boundary_penalty_lhs_fe->ksp_B = nested_matrices(0, 0);
simple_interface->getBoundaryFEName(),
boundary_penalty_lhs_fe);
CHKERR MatAssemblyBegin(nested_matrices(0, 0), MAT_FINAL_ASSEMBLY);
CHKERR MatAssemblyEnd(nested_matrices(0, 0), MAT_FINAL_ASSEMBLY);
CHKERR DMCreate(PETSC_COMM_WORLD, &dm_sub_LU);
CHKERR DMSetType(dm_sub_LU,
"DMMOFEM");
CHKERR DMSetFromOptions(dm_sub_LU);
simple_interface->getBoundaryFEName().c_str());
CHKERR DMCreateMatrix(dm_sub_LU, &nested_matrices(1, 0));
boundary_lhs_fe->ksp_B = nested_matrices(1, 0);
dm_sub_LU, simple_interface->getBoundaryFEName(), boundary_lhs_fe);
CHKERR MatAssemblyBegin(nested_matrices(1, 0), MAT_FINAL_ASSEMBLY);
CHKERR MatAssemblyEnd(nested_matrices(1, 0), MAT_FINAL_ASSEMBLY);
CHKERR MatTranspose(nested_matrices(1, 0), MAT_INITIAL_MATRIX,
&nested_matrices(0, 1));
domain_rhs_fe->ksp_f =
F;
domain_rhs_fe);
boundary_rhs_fe->ksp_f =
F;
boundary_rhs_fe);
nested_matrices(1, 1) = PETSC_NULL;
MatGetType(nested_matrices(0, 0), &type);
cerr <<
"K " <<
type << endl;
MatGetType(nested_matrices(1, 0), &type);
cerr <<
"C " <<
type << endl;
MatGetType(nested_matrices(0, 1), &type);
cerr <<
"CT " <<
type << endl;
std::string wait;
cerr << "UU" << endl;
MatView(nested_matrices(0, 0), PETSC_VIEWER_DRAW_WORLD);
std::cin >> wait;
cerr << "LU" << endl;
MatView(nested_matrices(1, 0), PETSC_VIEWER_DRAW_WORLD);
std::cin >> wait;
cerr << "UL" << endl;
MatView(nested_matrices(0, 1), PETSC_VIEWER_DRAW_WORLD);
std::cin >> wait;
}
CHKERR MatCreateNest(PETSC_COMM_WORLD, 2, &nested_is[0], 2, &nested_is[0],
&nested_matrices(0, 0), &
A);
KSP solver;
CHKERR KSPCreate(PETSC_COMM_WORLD, &solver);
CHKERR KSPSetFromOptions(solver);
PC pc;
PetscBool is_pcfs = PETSC_FALSE;
PetscObjectTypeCompare((PetscObject)pc, PCFIELDSPLIT, &is_pcfs);
if (is_pcfs) {
CHKERR PCFieldSplitSetIS(pc, NULL, nested_is[0]);
CHKERR PCFieldSplitSetIS(pc, NULL, nested_is[1]);
} else {
"Only works with pre-conditioner PCFIELDSPLIT");
}
for (
int i = 0;
i != 2;
i++) {
for (
int j = 0;
j != 2;
j++) {
CHKERR MatDestroy(&nested_matrices(
i,
j));
}
}
}
{
domain_error);
global_error);
if (flg_test == PETSC_TRUE) {
}
}
{
post_proc_volume);
post_proc_volume->writeFile("out_vol.h5m");
}
CHKERR VecDestroy(&global_error);
}
return 0;
}
ForcesAndSourcesCore::UserDataOperator UserDataOperator
static PetscErrorCode ierr
#define CATCH_ERRORS
Catch errors.
@ AINSWORTH_LEGENDRE_BASE
Ainsworth Cole (Legendre) approx. base .
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
@ L2
field with C-1 continuity
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
#define CHKERRG(n)
Check error code of MoFEM/MOAB/PETSc function.
@ MOFEM_DATA_INCONSISTENCY
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
#define CHKERR
Inline error check.
PetscErrorCode DMMoFEMCreateSubDM(DM subdm, DM dm, const char problem_name[])
Must be called by user to set Sub DM MoFEM data structures.
PetscErrorCode DMMoFEMAddElement(DM dm, std::string fe_name)
add element to dm
PetscErrorCode DMMoFEMSetSquareProblem(DM dm, PetscBool square_problem)
set squared problem
PetscErrorCode DMMoFEMAddSubFieldRow(DM dm, const char field_name[])
PetscErrorCode DMRegister_MoFEM(const char sname[])
Register MoFEM problem.
PetscErrorCode DMoFEMLoopFiniteElements(DM dm, const char fe_name[], MoFEM::FEMethod *method, CacheTupleWeakPtr cache_ptr=CacheTupleSharedPtr())
Executes FEMethod for finite elements in DM.
PetscErrorCode DMMoFEMAddSubFieldCol(DM dm, const char field_name[])
PetscErrorCode DMoFEMMeshToGlobalVector(DM dm, Vec g, InsertMode mode, ScatterMode scatter_mode)
set ghosted vector values on all existing mesh entities
FTensor::Index< 'i', SPACE_DIM > i
FTensor::Index< 'j', 3 > j
const FTensor::Tensor2< T, Dim, Dim > Vec
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
UBlasMatrix< double > MatrixDouble
PetscErrorCode DMMoFEMGetSubRowIS(DM dm, IS *is)
get sub problem is
MoFEMErrorCode MatSetValues(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const double *ptr, InsertMode iora)
Assemble PETSc matrix.
FTensor::Tensor1< double, 3 > operator()(const double x, const double y, const double t) const
double operator()(const double x, const double y, const double t) const
double operator()(const double x, const double y, const double z) const
static MoFEMErrorCode Initialize(int *argc, char ***args, const char file[], const char help[])
Initializes the MoFEM database PETSc, MOAB and MPI.
static MoFEMErrorCode Finalize()
Checks for options to be called at the conclusion of the program.
Deprecated interface functions.
MoFEMErrorCode getInterface(IFACE *&iface) const
Get interface refernce to pointer of interface.
int nbIntegrationPts
number of integration points
MoFEMErrorCode iNtegrate(EntitiesFieldData::EntData &row_data, EntitiesFieldData::EntData &col_data)
Integrate grad-grad operator.
int nbRows
number of dofs on rows
MoFEMErrorCode doWork(int row_side, int col_side, EntityType row_type, EntityType col_type, EntitiesFieldData::EntData &row_data, EntitiesFieldData::EntData &col_data)
Do calculations for give operator.
int nbCols
number if dof on column
const double bEta
error code
MatrixDouble locMat
local entity block matrix
bool isDiag
true if this block is on diagonal
MoFEMErrorCode aSsemble(EntitiesFieldData::EntData &row_data, EntitiesFieldData::EntData &col_data)
Assemble local entity block matrix.
FTensor::Index< 'i', 3 > i
summit Index
MoFEMErrorCode createGhostVec(Vec *ghost_vec) const
MoFEMErrorCode testError(Vec ghost_vec)
Test error.
MoFEMErrorCode assembleGhostVector(Vec ghost_vec) const
Assemble error vector.
MoFEMErrorCode printError(Vec ghost_vec)
Print error.
Create finite elements instances.
MoFEMErrorCode creatFEToPostProcessResults(boost::shared_ptr< PostProcFE > &post_proc_volume) const
Create finite element to post-process results.
MoFEMErrorCode createFEToEvaluateError(boost::function< double(const double, const double, const double)> f_u, boost::function< FTensor::Tensor1< double, 3 >(const double, const double, const double)> g_u, Vec global_error, boost::shared_ptr< ForcesAndSourcesCore > &domain_error) const
Create finite element to calculate error.
MoFEMErrorCode createFEToAssembleMatrixAndVector(boost::function< double(const double, const double, const double)> f_u, boost::function< double(const double, const double, const double)> f_source, boost::shared_ptr< ForcesAndSourcesCore > &domain_lhs_fe, boost::shared_ptr< ForcesAndSourcesCore > &boundary_lhs_fe, boost::shared_ptr< ForcesAndSourcesCore > &domain_rhs_fe, boost::shared_ptr< ForcesAndSourcesCore > &boundary_rhs_fe, bool trans=true) const
Create finite element to calculate matrix and vectors.
Set integration rule to boundary elements.
Assemble constrains vector.
We will start with declaration of data structure necessary for creation of nested matrix. Nested matrix is PETSc structure which is used to store block matrices and on which one can perform operations as on other matrices types.
Assembly of off-diagonal blocks is similar to diagonal term, note that of diagonal block is not square matrix and integration is only over finite elements entities on the boundary
Now we assemble global the right hand side vector, in the usual way using global DM
In order to run the program, one should first go to the directory where the problem is located, compile the code and then run the executable file. Typically, this can be done as follows
-ksp_type fgmres
-ksp_atol 1e-12
-ksp_rtol 0
-ksp_monitor ascii
-pc_fieldsplit_type schur
-pc_fieldsplit_schur_precondition self
-fieldsplit_0_ksp_type gmres
#-fieldsplit_0_ksp_monitor
-fieldsplit_0_ksp_max_it 100
-fieldsplit_0_ksp_atol 1e-6
-fieldsplit_0_ksp_rtol 1e-6
-fieldsplit_0_pc_type lu
-fieldsplit_0_pc_factor_mat_solver_type mumps
-fieldsplit_0_pc_type gamg
-fieldsplit_0_pc_mg_smoothup 5
-fieldsplit_0_pc_mg_smoothdown 5
-fieldsplit_0_mg_coarse_ksp_type preonly
-fieldsplit_0_mg_coarse_pc_type hypre
-fieldsplit_0_mg_coarse_pc_hypre_type parasails
-fieldsplit_0_mg_coarse_pc_hypre_parasails_sym SPD
-fieldsplit_0_mg_coarse_pc_hypre_parasails_thresh 0.2
-fieldsplit_0_mg_coarse_pc_hypre_parasails_logging 1
-fieldsplit_1_ksp_type minres
-fieldsplit_1_ksp_monitor
-fieldsplit_1_ksp_max_it 100
-fieldsplit_1_ksp_atol 1e-2
-fieldsplit_1_ksp_rtol 1e-2
-fieldsplit_1_pc_type lsc
-fieldsplit_1_lsc_ksp_type cg
-fieldsplit_1_lsc_ksp_atol 1e-5
-fieldsplit_1_lsc_ksp_rtol 1e-5
-fieldsplit_1_lsc_ksp_max_it 100
-fieldsplit_1_lsc_pc_type hypre
-fieldsplit_1_lsc_pc_hypre_type parasails
-fieldsplit_1_lsc_pc_hypre_parasails_sym SPD
-fieldsplit_1_lsc_pc_hypre_parasails_nlevels 1
-fieldsplit_1_lsc_pc_hypre_parasails_thresh 0.2
-fieldsplit_1_lsc_pc_hypre_parasails_logging 1
Note that KPS solver make 5 iterations to converge, since in this case, Schur complaint is approximated by pre-conditioner. If from other hand we use full Schur complement with and use direct solver
Note that we converged in three steps in that case.