18 IntegrationType::GAUSS;
37 std::pair<boost::shared_ptr<PostProcEle>,
38 boost::shared_ptr<SkinPostProcEle>>
43 std::array<double, 3> pass_field_eval_coords,
44 boost::shared_ptr<SetPtsData> pass_field_eval_data,
45 boost::shared_ptr<MatrixDouble> vec_field_ptr)
65 ->evalFEAtThePoint<SPACE_DIM>(
75 <<
"U_X: " << t_disp(0) <<
" U_Y: " << t_disp(1);
78 <<
"U_X: " << t_disp(0) <<
" U_Y: " << t_disp(1)
79 <<
" U_Z: " << t_disp(2);
84 auto make_vtk = [&]() {
90 boost::lexical_cast<std::string>(
ts_step) +
97 "out_skin_incomp_elasticity_" +
98 boost::lexical_cast<std::string>(
ts_step) +
".h5m");
103 auto print_max_min = [&](
auto &tuple,
const std::string msg) {
105 CHKERR VecScatterBegin(std::get<1>(tuple),
ts_u, std::get<0>(tuple),
106 INSERT_VALUES, SCATTER_FORWARD);
107 CHKERR VecScatterEnd(std::get<1>(tuple),
ts_u, std::get<0>(tuple),
108 INSERT_VALUES, SCATTER_FORWARD);
110 CHKERR VecMax(std::get<0>(tuple), PETSC_NULLPTR, &max);
111 CHKERR VecMin(std::get<0>(tuple), PETSC_NULLPTR, &min);
113 "%s time %3.4e min %3.4e max %3.4e", msg.c_str(),
ts_t, min,
145 [](ForcesAndSourcesCore::UserDataOperator *op_ptr,
149 op_ptr->getKSPA(), row_data, col_data,
m, ADD_VALUES);
157 using DomainEleOp::DomainEleOp;
168 [](ForcesAndSourcesCore::UserDataOperator *op_ptr,
172 op_ptr->getKSPB(), row_data, col_data,
m, ADD_VALUES);
180inline static double mu;
210 boost::shared_ptr<MatrixDouble> strain_ptr,
211 boost::shared_ptr<VectorDouble> pressure_ptr)
227 const size_t nb_gauss_pts = getGaussPts().size2();
229 stressPtr->resize((DIM * (DIM + 1)) / 2, nb_gauss_pts);
234 const double l_mu =
mU;
236 for (
auto gg = 0; gg != nb_gauss_pts; ++gg) {
238 t_stress(
i,
j) = t_pressure *
t_kd(
i,
j) + 2. * l_mu * t_strain(
i,
j);
283 enum bases { AINSWORTH, DEMKOWICZ, LASBASETOPT };
284 const char *list_bases[LASBASETOPT] = {
"ainsworth",
"demkowicz"};
285 PetscInt choice_base_value = AINSWORTH;
287 LASBASETOPT, &choice_base_value, PETSC_NULLPTR);
294 switch (choice_base_value) {
297 MOFEM_LOG(
"INCOMP_ELASTICITY", Sev::inform)
298 <<
"Set AINSWORTH_LEGENDRE_BASE for displacements";
302 MOFEM_LOG(
"INCOMP_ELASTICITY", Sev::inform)
303 <<
"Set DEMKOWICZ_JACOBI_BASE for displacements";
338 auto project_ho_geometry = [&]() {
342 CHKERR project_ho_geometry();
351 auto get_options = [&]() {
358 MOFEM_LOG(
"INCOMP_ELASTICITY", Sev::inform)
360 MOFEM_LOG(
"INCOMP_ELASTICITY", Sev::inform)
364 const double lambda_denom =
384 auto time_scale = boost::make_shared<TimeScale>();
400 pipeline_mng->getOpBoundaryRhsPipeline(), {NOSPACE},
"GEOMETRY");
403 pipeline_mng->getOpBoundaryRhsPipeline(),
mField,
"U", {time_scale},
404 "FORCE", Sev::inform);
408 pipeline_mng->getOpDomainRhsPipeline(),
mField,
"U", {time_scale},
409 "BODY_FORCE", Sev::inform);
412 CHKERR bc_mng->removeBlockDOFsOnEntities(
simple->getProblemName(),
"REMOVE_X",
414 CHKERR bc_mng->removeBlockDOFsOnEntities(
simple->getProblemName(),
"REMOVE_Y",
416 CHKERR bc_mng->removeBlockDOFsOnEntities(
simple->getProblemName(),
"REMOVE_Z",
419 simple->getProblemName(),
"U");
430 auto integration_rule_vol = [](int, int,
int approx_order) {
434 auto add_domain_base_ops = [&](
auto &pip) {
441 auto add_domain_ops_lhs = [&](
auto &pip) {
459 auto mat_D_ptr = boost::make_shared<MatrixDouble>();
474 [](
const double,
const double,
const double)
constexpr {
return -1.; },
476 pip.push_back(
new OpGradSymTensorGrad(
"U",
"U", mat_D_ptr));
482 pip.push_back(
new OpMassPressure(
"P",
"P", get_lambda_reciprocal));
488 pip.push_back(
new OpMassPressureStab(
489 "P",
"P", [eps_stab](
double,
double,
double) {
return eps_stab; }));
494 auto add_domain_ops_rhs = [&](
auto &pip) {
509 auto pressure_ptr = boost::make_shared<VectorDouble>();
512 auto div_u_ptr = boost::make_shared<VectorDouble>();
516 auto grad_u_ptr = boost::make_shared<MatrixDouble>();
520 auto strain_ptr = boost::make_shared<MatrixDouble>();
530 pip.push_back(
new OpDomainGradTimesTensor(
"U", strain_ptr, get_four_mu));
532 pip.push_back(
new OpDivDeltaUTimesP(
"U", pressure_ptr, minus_one));
534 pip.push_back(
new OpBaseTimesScalarValues(
"P", div_u_ptr, minus_one));
540 pip.push_back(
new OpBaseTimesScalarValues(
"P", pressure_ptr,
541 get_lambda_reciprocal));
547 CHKERR add_domain_base_ops(pip_mng->getOpDomainLhsPipeline());
548 CHKERR add_domain_base_ops(pip_mng->getOpDomainRhsPipeline());
549 CHKERR add_domain_ops_lhs(pip_mng->getOpDomainLhsPipeline());
550 CHKERR add_domain_ops_rhs(pip_mng->getOpDomainRhsPipeline());
552 CHKERR pip_mng->setDomainRhsIntegrationRule(integration_rule_vol);
553 CHKERR pip_mng->setDomainLhsIntegrationRule(integration_rule_vol);
561 static boost::shared_ptr<SetUpSchur>
576 auto set_section_monitor = [&](
auto solver) {
579 CHKERR TSGetSNES(solver, &snes);
580 PetscViewerAndFormat *vf;
581 CHKERR PetscViewerAndFormatCreate(PETSC_VIEWER_STDOUT_WORLD,
582 PETSC_VIEWER_DEFAULT, &vf);
586 void *))SNESMonitorFields,
591 auto scatter_create = [&](
auto D,
auto coeff) {
593 CHKERR is_manager->isCreateProblemFieldAndRank(
simple->getProblemName(),
594 ROW,
"U", coeff, coeff, is);
596 CHKERR ISGetLocalSize(is, &loc_size);
600 CHKERR VecScatterCreate(
D, is,
v, PETSC_NULLPTR, &scatter);
605 auto create_post_process_elements = [&]() {
606 auto pp_fe = boost::make_shared<PostProcEle>(
mField);
608 auto push_vol_ops = [
this](
auto &pip) {
616 auto push_vol_post_proc_ops = [
this](
auto &pp_fe,
auto &&p) {
619 auto &pip = pp_fe->getOpPtrVector();
623 auto x_ptr = boost::make_shared<MatrixDouble>();
626 auto u_ptr = boost::make_shared<MatrixDouble>();
629 auto pressure_ptr = boost::make_shared<VectorDouble>();
632 auto div_u_ptr = boost::make_shared<VectorDouble>();
636 auto grad_u_ptr = boost::make_shared<MatrixDouble>();
640 auto strain_ptr = boost::make_shared<MatrixDouble>();
643 auto stress_ptr = boost::make_shared<MatrixDouble>();
645 mu, stress_ptr, strain_ptr, pressure_ptr));
651 pp_fe->getPostProcMesh(), pp_fe->getMapGaussPts(),
653 {{
"P", pressure_ptr}},
655 {{
"U", u_ptr}, {
"GEOMETRY", x_ptr}},
659 {{
"STRAIN", strain_ptr}, {
"STRESS", stress_ptr}}
668 auto vol_post_proc = [
this, push_vol_post_proc_ops, push_vol_ops]() {
669 PetscBool post_proc_vol = PETSC_FALSE;
671 &post_proc_vol, PETSC_NULLPTR);
672 if (post_proc_vol == PETSC_FALSE)
673 return boost::shared_ptr<PostProcEle>();
674 auto pp_fe = boost::make_shared<PostProcEle>(
mField);
676 push_vol_post_proc_ops(pp_fe, push_vol_ops(pp_fe->getOpPtrVector())),
677 "push_vol_post_proc_ops");
681 auto skin_post_proc = [
this, push_vol_post_proc_ops, push_vol_ops]() {
682 PetscBool post_proc_skin = PETSC_TRUE;
684 &post_proc_skin, PETSC_NULLPTR);
685 if (post_proc_skin == PETSC_FALSE)
686 return boost::shared_ptr<SkinPostProcEle>();
689 auto pp_fe = boost::make_shared<SkinPostProcEle>(
mField);
692 pp_fe->getOpPtrVector().push_back(op_side);
694 pp_fe, push_vol_ops(op_side->getOpPtrVector())),
695 "push_vol_post_proc_ops");
699 return std::make_pair(vol_post_proc(), skin_post_proc());
702 boost::shared_ptr<SetPtsData> field_eval_data;
703 boost::shared_ptr<MatrixDouble> vector_field_ptr;
706 vector_field_ptr = boost::make_shared<MatrixDouble>();
710 field_eval_data,
simple->getDomainFEName());
712 field_eval_data->setEvalPoints(fieldEvalCoords.data(), 1);
713 auto no_rule = [](int, int, int) {
return -1; };
714 auto field_eval_fe_ptr = field_eval_data->feMethodPtr.lock();
715 field_eval_fe_ptr->getRuleHook = no_rule;
716 field_eval_fe_ptr->getOpPtrVector().push_back(
720 auto set_time_monitor = [&](
auto dm,
auto solver) {
722 boost::shared_ptr<MonitorIncompressible> monitor_ptr(
724 create_post_process_elements(), uXScatter,
725 uYScatter, uZScatter, fieldEvalCoords,
726 field_eval_data, vector_field_ptr));
727 boost::shared_ptr<ForcesAndSourcesCore> null;
729 monitor_ptr, null, null);
733 auto set_essential_bc = [&]() {
737 auto pre_proc_ptr = boost::make_shared<FEMethod>();
738 auto post_proc_rhs_ptr = boost::make_shared<FEMethod>();
741 auto time_scale = boost::make_shared<TimeScale>();
744 mField, pre_proc_ptr, {time_scale},
false);
745 post_proc_rhs_ptr->postProcessHook =
748 ts_ctx_ptr->getPreProcessIFunction().push_front(pre_proc_ptr);
749 ts_ctx_ptr->getPreProcessIJacobian().push_front(pre_proc_ptr);
750 ts_ctx_ptr->getPostProcessIFunction().push_back(post_proc_rhs_ptr);
754 auto set_schur_pc = [&](
auto solver) {
756 CHKERR TSGetSNES(solver, &snes);
758 CHKERR SNESGetKSP(snes, &ksp);
760 CHKERR KSPGetPC(ksp, &pc);
761 PetscBool is_pcfs = PETSC_FALSE;
762 PetscObjectTypeCompare((PetscObject)pc, PCFIELDSPLIT, &is_pcfs);
763 boost::shared_ptr<SetUpSchur> schur_ptr;
772 auto pre_proc_schur_lhs_ptr = boost::make_shared<FEMethod>();
773 auto post_proc_schur_lhs_ptr = boost::make_shared<FEMethod>();
774 pre_proc_schur_lhs_ptr->preProcessHook = [pre_proc_schur_lhs_ptr]() {
776 MOFEM_LOG(
"INCOMP_ELASTICITY", Sev::verbose) <<
"Lhs Zero matrices";
777 CHKERR MatZeroEntries(pre_proc_schur_lhs_ptr->A);
778 CHKERR MatZeroEntries(pre_proc_schur_lhs_ptr->B);
781 post_proc_schur_lhs_ptr->postProcessHook = [
this,
782 post_proc_schur_lhs_ptr]() {
784 MOFEM_LOG(
"INCOMP_ELASTICITY", Sev::verbose) <<
"Lhs Assemble Begin";
785 *(post_proc_schur_lhs_ptr->matAssembleSwitch) =
false;
786 CHKERR MatAssemblyBegin(post_proc_schur_lhs_ptr->A, MAT_FINAL_ASSEMBLY);
787 CHKERR MatAssemblyEnd(post_proc_schur_lhs_ptr->A, MAT_FINAL_ASSEMBLY);
789 mField, post_proc_schur_lhs_ptr, 1.,
792 CHKERR MatAssemblyBegin(post_proc_schur_lhs_ptr->B, MAT_FINAL_ASSEMBLY);
793 CHKERR MatAssemblyEnd(post_proc_schur_lhs_ptr->B, MAT_FINAL_ASSEMBLY);
794 CHKERR MatAXPY(post_proc_schur_lhs_ptr->B, 1, post_proc_schur_lhs_ptr->A,
795 SAME_NONZERO_PATTERN);
796 MOFEM_LOG(
"INCOMP_ELASTICITY", Sev::verbose) <<
"Lhs Assemble End";
799 ts_ctx_ptr->getPreProcessIJacobian().push_front(pre_proc_schur_lhs_ptr);
800 ts_ctx_ptr->getPostProcessIJacobian().push_back(post_proc_schur_lhs_ptr);
802 if (is_pcfs == PETSC_TRUE) {
803 if (
AT == AssemblyType::SCHUR) {
805 CHK_MOAB_THROW(schur_ptr->setUp(solver),
"setup schur preconditioner");
807 auto set_pcfieldsplit_preconditioned_ts = [&](
auto solver) {
809 auto name_prb =
simple->getProblemName();
812 name_prb,
ROW,
"P", 0, 1, is_p);
813 CHKERR PCFieldSplitSetIS(pc, PETSC_NULLPTR, is_p);
817 "set pcfieldsplit preconditioned");
819 return boost::make_tuple(schur_ptr, A,
B);
822 return boost::make_tuple(schur_ptr, A,
B);
825 auto dm =
simple->getDM();
828 uXScatter = scatter_create(
D, 0);
829 uYScatter = scatter_create(
D, 1);
831 uZScatter = scatter_create(
D, 2);
835 CHKERR set_essential_bc();
837 auto solver = pip_mng->createTSIM();
838 CHKERR TSSetFromOptions(solver);
840 CHKERR set_section_monitor(solver);
841 CHKERR set_time_monitor(dm, solver);
842 auto [schur_pc_ptr,
A,
B] = set_schur_pc(solver);
844 CHKERR TSSetSolution(solver,
D);
846 CHKERR TSSolve(solver, NULL);
862 double Ux_ref, Uy_ref, Uz_ref;
865 auto vector_field_ptr = boost::make_shared<MatrixDouble>();
867 auto field_eval_data =
873 auto no_rule = [](int, int, int) {
return -1; };
874 auto field_eval_fe_ptr = field_eval_data->feMethodPtr.lock();
875 field_eval_fe_ptr->getRuleHook = no_rule;
876 field_eval_fe_ptr->getOpPtrVector().push_back(
880 ->evalFEAtThePoint<SPACE_DIM>(
888 CHKERR VecZeroEntries(eval_num_vec);
889 if (vector_field_ptr->size1()) {
890 CHKERR VecSetValue(eval_num_vec, 0, 1, ADD_VALUES);
892 CHKERR VecAssemblyBegin(eval_num_vec);
893 CHKERR VecAssemblyEnd(eval_num_vec);
896 CHKERR VecSum(eval_num_vec, &eval_num);
897 if (!(
int)eval_num) {
899 "atom test %d failed: did not find elements to evaluate "
900 "the field, check the coordinates",
904 if (vector_field_ptr->size1()) {
911 test_name =
"Cooke's Membrane";
915 "atom test %d does not exist",
atom_test);
918 auto calculate_error = [](
double computed,
double reference,
920 return fabs(computed - reference) > tolerance * fabs(reference);
923 if (calculate_error(t_disp(0), Ux_ref,
eps) ||
924 calculate_error(t_disp(1), Uy_ref,
eps) ||
925 (
SPACE_DIM == 3 && calculate_error(t_disp(2), Uz_ref,
eps))) {
927 "atom test %d:%s failed: Displacement exceeds tolerance: Ux = "
929 atom_test, test_name.c_str(), t_disp(0), t_disp(1));
940int main(
int argc,
char *argv[]) {
943 const char param_file[] =
"param_file.petsc";
947 auto core_log = logging::core::get();
956 DMType dm_name =
"DMMOFEM";
961 moab::Core mb_instance;
962 moab::Interface &moab = mb_instance;
1011 auto dm =
simple->getDM();
1014 CHKERR TSGetSNES(solver, &snes);
1016 CHKERR SNESGetKSP(snes, &ksp);
1018 CHKERR KSPGetPC(ksp, &pc);
1020 MOFEM_LOG(
"INCOMP_ELASTICITY", Sev::inform) <<
"Setup Schur pc";
1024 "Is expected that schur matrix is not allocated. This is "
1025 "possible only is PC is set up twice");
1028 auto create_sub_dm = [&]() {
1030 auto set_up = [&]() {
1043 subDM = create_sub_dm();
1051 pip->getOpDomainLhsPipeline().push_back(
1054 auto pre_proc_schur_lhs_ptr = boost::make_shared<FEMethod>();
1055 auto post_proc_schur_lhs_ptr = boost::make_shared<FEMethod>();
1057 pre_proc_schur_lhs_ptr->preProcessHook = [
this]() {
1063 post_proc_schur_lhs_ptr->postProcessHook = [
this, ao_up,
1064 post_proc_schur_lhs_ptr]() {
1067 CHKERR MatAssemblyBegin(
S, MAT_FINAL_ASSEMBLY);
1068 CHKERR MatAssemblyEnd(
S, MAT_FINAL_ASSEMBLY);
1070 mField, post_proc_schur_lhs_ptr, 1,
S, ao_up)();
1073 auto print_mat_norm = [
this](
auto a, std::string prefix) {
1076 CHKERR MatNorm(
a, NORM_FROBENIUS, &nrm);
1077 MOFEM_LOG(
"INCOMP_ELASTICITY", Sev::verbose)
1078 << prefix <<
" norm = " << nrm;
1081 CHKERR print_mat_norm(
S,
"S");
1087 ts_ctx_ptr->getPreProcessIJacobian().push_front(pre_proc_schur_lhs_ptr);
1088 ts_ctx_ptr->getPostProcessIJacobian().push_back(post_proc_schur_lhs_ptr);
1092 simple->getProblemName(),
ROW,
"P", 0, 1, is_p);
1093 CHKERR PCFieldSplitSetIS(pc, NULL, is_p);
1094 CHKERR PCFieldSplitSetSchurPre(pc, PC_FIELDSPLIT_SCHUR_PRE_USER,
S);
1099boost::shared_ptr<SetUpSchur>
1101 return boost::shared_ptr<SetUpSchur>(
new SetUpSchurImpl(m_field));
ForcesAndSourcesCore::UserDataOperator UserDataOperator
#define MOFEM_LOG_SEVERITY_SYNC(comm, severity)
Synchronise "SYNC" on curtain severity level.
#define MOFEM_LOG_C(channel, severity, format,...)
void simple(double P1[], double P2[], double P3[], double c[], const int N)
ElementsAndOps< SPACE_DIM >::BoundaryEle BoundaryEle
ElementsAndOps< SPACE_DIM >::DomainEle DomainEle
[Define dimension]
Kronecker Delta class symmetric.
#define CATCH_ERRORS
Catch errors.
FieldApproximationBase
approximation base
@ AINSWORTH_LEGENDRE_BASE
Ainsworth Cole (Legendre) approx. base nme:nme847.
#define CHK_THROW_MESSAGE(err, msg)
Check and throw MoFEM exception.
@ L2
field with C-1 continuity
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
#define CHK_MOAB_THROW(err, msg)
Check error code of MoAB function and throw MoFEM exception.
@ MOFEM_ATOM_TEST_INVALID
@ MOFEM_DATA_INCONSISTENCY
CoordinateTypes
Coodinate system.
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
#define CHKERR
Inline error check.
FormsIntegrators< DomainEleOp >::Assembly< A >::BiLinearForm< I >::OpMixScalarTimesDiv< SPACE_DIM, COORD_TYPE > OpMixScalarTimesDiv
PetscErrorCode DMMoFEMCreateSubDM(DM subdm, DM dm, const char problem_name[])
Must be called by user to set Sub DM MoFEM data structures.
PetscErrorCode DMMoFEMAddElement(DM dm, std::string fe_name)
add element to dm
PetscErrorCode DMMoFEMSetSquareProblem(DM dm, PetscBool square_problem)
set squared problem
PetscErrorCode DMMoFEMAddSubFieldRow(DM dm, const char field_name[])
PetscErrorCode DMRegister_MoFEM(const char sname[])
Register MoFEM problem.
PetscErrorCode DMoFEMLoopFiniteElements(DM dm, const char fe_name[], MoFEM::FEMethod *method, CacheTupleWeakPtr cache_ptr=CacheTupleSharedPtr())
Executes FEMethod for finite elements in DM.
auto createDMVector(DM dm)
Get smart vector from DM.
PetscErrorCode DMoFEMGetInterfacePtr(DM dm, MoFEM::Interface **m_field_ptr)
Get pointer to MoFEM::Interface.
auto createDMMatrix(DM dm)
Get smart matrix from DM.
static LoggerType & setLog(const std::string channel)
Set ans resset chanel logger.
#define MOFEM_LOG(channel, severity)
Log.
#define MOFEM_LOG_TAG(channel, tag)
Tag channel.
virtual MoFEMErrorCode loop_dofs(const Problem *problem_ptr, const std::string &field_name, RowColData rc, DofMethod &method, int lower_rank, int upper_rank, int verb=DEFAULT_VERBOSITY)=0
Make a loop over dofs.
FTensor::Index< 'i', SPACE_DIM > i
static double young_modulus
constexpr AssemblyType AT
constexpr IntegrationType IT
static char help[]
[Check]
constexpr CoordinateTypes coord_type
static double poisson_ratio
PetscBool isDiscontinuousPressure
int order
[Specialisation for assembly]
const double v
phase velocity of light in medium (cm/ns)
FTensor::Index< 'l', 3 > l
FTensor::Index< 'j', 3 > j
FTensor::Index< 'k', 3 > k
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
implementation of Data Operators for Forces and Sources
PetscErrorCode DMMoFEMTSSetMonitor(DM dm, TS ts, const std::string fe_name, boost::shared_ptr< MoFEM::FEMethod > method, boost::shared_ptr< MoFEM::BasicMethod > pre_only, boost::shared_ptr< MoFEM::BasicMethod > post_only)
Set Monitor To TS solver.
PetscErrorCode TsSetIJacobian(TS ts, PetscReal t, Vec u, Vec u_t, PetscReal a, Mat A, Mat B, void *ctx)
Set function evaluating jacobian in TS solver.
auto getDMTsCtx(DM dm)
Get TS context data structure used by DM.
OpSchurAssembleBase * createOpSchurAssembleEnd(std::vector< std::string > fields_name, std::vector< boost::shared_ptr< Range > > field_ents, SmartPetscObj< AO > ao, SmartPetscObj< Mat > schur, bool sym_schur, bool symm_op)
Construct a new Op Schur Assemble End object.
PetscErrorCode PetscOptionsGetInt(PetscOptions *, const char pre[], const char name[], PetscInt *ivalue, PetscBool *set)
PetscErrorCode PetscOptionsGetBool(PetscOptions *, const char pre[], const char name[], PetscBool *bval, PetscBool *set)
PetscErrorCode PetscOptionsGetScalar(PetscOptions *, const char pre[], const char name[], PetscScalar *dval, PetscBool *set)
PetscErrorCode PetscOptionsGetRealArray(PetscOptions *, const char pre[], const char name[], PetscReal dval[], PetscInt *nmax, PetscBool *set)
auto createVectorMPI(MPI_Comm comm, PetscInt n, PetscInt N)
Create MPI Vector.
static FTensor::Ddg< FTensor::PackPtr< T *, 1 >, Tensor_Dim01, Tensor_Dim23 > getFTensor4DdgFromMat(ublas::matrix< T, L, A > &data)
Get symmetric tensor rank 4 on first two and last indices from form data matrix.
auto getDMSubData(DM dm)
Get sub problem data structure.
FTensor::Tensor1< FTensor::PackPtr< T *, S >, Tensor_Dim > getFTensor1FromMat(ublas::matrix< T, L, A > &data)
Get tensor rank 1 (vector) form data matrix.
static auto getFTensor2SymmetricFromMat(ublas::matrix< T, L, A > &data)
Get symmetric tensor rank 2 (matrix) form data matrix.
MoFEMErrorCode MatSetValues(Mat M, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, const double *ptr, InsertMode iora)
Assemble PETSc matrix.
auto createAOMappingIS(IS isapp, IS ispetsc)
Creates an application mapping using two index sets.
static auto getFTensor0FromVec(ublas::vector< T, A > &data)
Get tensor rank 0 (scalar) form data vector.
PetscErrorCode PetscOptionsGetEList(PetscOptions *, const char pre[], const char name[], const char *const *list, PetscInt next, PetscInt *value, PetscBool *set)
SmartPetscObj< Mat > matDuplicate(Mat mat, MatDuplicateOption op)
auto createDM(MPI_Comm comm, const std::string dm_type_name)
Creates smart DM object.
OpSchurAssembleBase * createOpSchurAssembleBegin()
OpPostProcMapInMoab< SPACE_DIM, SPACE_DIM > OpPPMap
#define EXECUTABLE_DIMENSION
static constexpr int approx_order
FTensor::Index< 'm', 3 > m
Element used to specialise assembly.
std::tuple< SmartPetscObj< Vec >, SmartPetscObj< VecScatter > > uZScatter
MoFEMErrorCode checkResults()
[Solve]
MoFEMErrorCode createCommonData()
[Set up problem]
MoFEMErrorCode setupProblem()
[Run problem]
std::tuple< SmartPetscObj< Vec >, SmartPetscObj< VecScatter > > uYScatter
MoFEM::Interface & mField
MoFEMErrorCode runProblem()
[Run problem]
MoFEMErrorCode OPs()
[Boundary condition]
std::tuple< SmartPetscObj< Vec >, SmartPetscObj< VecScatter > > uXScatter
MoFEMErrorCode bC()
[Create common data]
std::array< double, 3 > fieldEvalCoords
Incompressible(MoFEM::Interface &m_field)
Add operators pushing bases from local to physical configuration.
boost::weak_ptr< CacheTuple > getCacheWeakPtr() const
Get the cache weak ptr object.
Simple interface for fast problem set-up.
virtual MPI_Comm & get_comm() const =0
virtual int get_comm_rank() const =0
static MoFEMErrorCode Initialize(int *argc, char ***args, const char file[], const char help[])
Initializes the MoFEM database PETSc, MOAB and MPI.
static MoFEMErrorCode Finalize()
Checks for options to be called at the conclusion of the program.
Deprecated interface functions.
Definition of the displacement bc data structure.
Data on single entity (This is passed as argument to DataOperator::doWork)
Class (Function) to enforce essential constrains on the left hand side diagonal.
Class (Function) to enforce essential constrains on the right hand side diagonal.
Class (Function) to enforce essential constrains.
structure for User Loop Methods on finite elements
Field evaluator interface.
structure to get information form mofem into EntitiesFieldData
Section manager is used to create indexes and sections.
static boost::shared_ptr< SinkType > createSink(boost::shared_ptr< std::ostream > stream_ptr, std::string comm_filter)
Create a sink object.
static boost::shared_ptr< std::ostream > getStrmWorld()
Get the strm world object.
boost::function< MoFEMErrorCode( ForcesAndSourcesCore::UserDataOperator *op_ptr, const EntitiesFieldData::EntData &row_data, const EntitiesFieldData::EntData &col_data, MatrixDouble &m)> MatSetValuesHook
Calculate field values (template specialization) for tensor field rank 1, i.e. vector field.
Get value at integration points for scalar field.
Get field gradients at integration pts for scalar field rank 0, i.e. vector field.
Get values at integration pts for tensor field rank 1, i.e. vector field.
Element used to execute operators on side of the element.
Post post-proc data at points from hash maps.
PipelineManager interface.
Projection of edge entities with one mid-node on hierarchical basis.
Simple interface for fast problem set-up.
MoFEMErrorCode getOptions()
get options
const std::string getProblemName() const
Get the Problem Name.
const std::string getDomainFEName() const
Get the Domain FE Name.
intrusive_ptr for managing petsc objects
PetscInt ts_step
time step number
MoFEMErrorCode getInterface(IFACE *&iface) const
Get interface reference to pointer of interface.
MoFEMErrorCode preProcess()
function is run at the beginning of loop
boost::shared_ptr< MatrixDouble > vecFieldPtr
std::array< double, 3 > fieldEvalCoords
std::tuple< SmartPetscObj< Vec >, SmartPetscObj< VecScatter > > uXScatter
boost::shared_ptr< SkinPostProcEle > skinPostProcFe
std::tuple< SmartPetscObj< Vec >, SmartPetscObj< VecScatter > > uYScatter
MonitorIncompressible(SmartPetscObj< DM > dm, std::pair< boost::shared_ptr< PostProcEle >, boost::shared_ptr< SkinPostProcEle > > pair_post_proc_fe, std::tuple< SmartPetscObj< Vec >, SmartPetscObj< VecScatter > > ux_scatter, std::tuple< SmartPetscObj< Vec >, SmartPetscObj< VecScatter > > uy_scatter, std::tuple< SmartPetscObj< Vec >, SmartPetscObj< VecScatter > > uz_scatter, std::array< double, 3 > pass_field_eval_coords, boost::shared_ptr< SetPtsData > pass_field_eval_data, boost::shared_ptr< MatrixDouble > vec_field_ptr)
MoFEMErrorCode operator()()
function is run for every finite element
boost::shared_ptr< SetPtsData > fieldEvalData
std::tuple< SmartPetscObj< Vec >, SmartPetscObj< VecScatter > > uZScatter
boost::shared_ptr< PostProcEle > postProcFe
MoFEMErrorCode postProcess()
function is run at the end of loop
OpCalculateLameStress(double m_u, boost::shared_ptr< MatrixDouble > stress_ptr, boost::shared_ptr< MatrixDouble > strain_ptr, boost::shared_ptr< VectorDouble > pressure_ptr)
boost::shared_ptr< MatrixDouble > stressPtr
boost::shared_ptr< VectorDouble > pressurePtr
boost::shared_ptr< MatrixDouble > strainPtr
MoFEMErrorCode doWork(int side, EntityType type, EntitiesFieldData::EntData &data)
SmartPetscObj< DM > subDM
field split sub dm
SetUpSchurImpl(MoFEM::Interface &m_field)
SmartPetscObj< DM > createSubDM()
MoFEMErrorCode setUp(SmartPetscObj< TS > solver)
MoFEMErrorCode setUp(SmartPetscObj< KSP >)
virtual ~SetUpSchurImpl()
MoFEMErrorCode setPC(PC pc)
MoFEMErrorCode setOperator()
MoFEM::Interface & mField
[Push operators to pipeline]
static boost::shared_ptr< SetUpSchur > createSetUpSchur(MoFEM::Interface &m_field)
virtual MoFEMErrorCode setUp(SmartPetscObj< TS > solver)=0
static boost::shared_ptr< SetUpSchur > createSetUpSchur(MoFEM::Interface &m_field)