static char help[] =
"...\n\n";
};
struct OpAssembleMass :
OpEle {
OpAssembleMass(boost::shared_ptr<CommonData> &data)
}
const int nb_row_dofs = row_data.
getIndices().size();
const int nb_col_dofs = col_data.
getIndices().size();
if (nb_row_dofs && nb_col_dofs) {
mat.resize(nb_row_dofs, nb_col_dofs,
false);
for (int gg = 0; gg != nb_integration_pts; ++gg) {
const double a = t_w * vol;
for (int rr = 0; rr != nb_row_dofs; ++rr) {
for (int cc = 0; cc != nb_col_dofs; ++cc) {
mat(rr, cc) +=
a * t_row_base * t_col_base;
++t_col_base;
}
++t_row_base;
}
++t_w;
}
ADD_VALUES);
if (row_side != col_side || row_type != col_type) {
transMat.resize(nb_col_dofs, nb_row_dofs,
false);
ADD_VALUES);
}
}
}
private:
};
struct OpAssembleSlowRhs :
OpEle {
OpAssembleSlowRhs(boost::shared_ptr<CommonData> &data)
if (nb_dofs) {
vecF.resize(nb_dofs,
false);
for (int gg = 0; gg != nb_integration_pts; ++gg) {
const double a = vol * t_w;
const double f =
a *
r * t_val * (1 - t_val /
k);
for (int rr = 0; rr != nb_dofs; ++rr) {
const double b = f * t_base;
++t_base;
}
++t_val;
++t_w;
}
PETSC_TRUE);
ADD_VALUES);
}
}
private:
};
template <
int DIM>
struct OpAssembleStiffRhs :
OpEle {
OpAssembleStiffRhs(boost::shared_ptr<CommonData> &data)
if (nb_dofs) {
vecF.resize(nb_dofs,
false);
auto t_dot_val = getFTensor0FromVec(
commonData->dot_val);
auto t_grad = getFTensor1FromMat<DIM>(
commonData->grad);
for (int gg = 0; gg != nb_integration_pts; ++gg) {
const double a = vol * t_w;
for (int rr = 0; rr != nb_dofs; ++rr) {
vecF[rr] +=
a * (t_base * t_dot_val +
D * t_diff_base(
i) * t_grad(
i));
++t_diff_base;
++t_base;
}
++t_dot_val;
++t_grad;
++t_w;
}
PETSC_TRUE);
ADD_VALUES);
}
}
private:
};
template <
int DIM>
struct OpAssembleStiffLhs :
OpEle {
OpAssembleStiffLhs(boost::shared_ptr<CommonData> &data)
}
const int nb_row_dofs = row_data.
getIndices().size();
const int nb_col_dofs = col_data.
getIndices().size();
if (nb_row_dofs && nb_col_dofs) {
mat.resize(nb_row_dofs, nb_col_dofs,
false);
for (int gg = 0; gg != nb_integration_pts; ++gg) {
const double a = vol * t_w;
for (int rr = 0; rr != nb_row_dofs; ++rr) {
for (int cc = 0; cc != nb_col_dofs; ++cc) {
mat(rr, cc) +=
a * (t_row_base * t_col_base * ts_a +
D * t_row_diff_base(
i) * t_col_diff_base(
i));
++t_col_base;
++t_col_diff_base;
}
++t_row_base;
++t_row_diff_base;
}
++t_w;
}
ADD_VALUES);
if (row_side != col_side || row_type != col_type) {
transMat.resize(nb_col_dofs, nb_row_dofs,
false);
}
}
}
private:
};
"out_level_" + boost::lexical_cast<std::string>(
ts_step) +
".h5m");
}
}
private:
boost::shared_ptr<PostProcEle>
postProc;
};
};
int main(
int argc,
char *argv[]) {
try {
moab::Core mb_instance;
moab::Interface &moab = mb_instance;
DMType dm_name = "DMMOFEM";
CHKERR DMRegister_MoFEM(dm_name);
CHKERR PetscOptionsGetInt(PETSC_NULL,
"",
"-order", &
order, PETSC_NULL);
1);
boost::shared_ptr<CommonData> data(
new CommonData());
auto val_ptr = boost::shared_ptr<VectorDouble>(data, &data->val);
auto dot_val_ptr = boost::shared_ptr<VectorDouble>(data, &data->dot_val);
auto grad_ptr = boost::shared_ptr<MatrixDouble>(data, &data->grad);
boost::shared_ptr<Ele> vol_ele_slow_rhs(
new Ele(m_field));
boost::shared_ptr<Ele> vol_ele_stiff_rhs(
new Ele(m_field));
boost::shared_ptr<Ele> vol_ele_stiff_lhs(
new Ele(m_field));
vol_ele_slow_rhs->getOpPtrVector().push_back(
auto solve_for_g = [&]() {
if (*(vol_ele_slow_rhs->vecAssembleSwitch)) {
CHKERR VecGhostUpdateBegin(vol_ele_slow_rhs->ts_F, ADD_VALUES,
SCATTER_REVERSE);
CHKERR VecGhostUpdateEnd(vol_ele_slow_rhs->ts_F, ADD_VALUES,
SCATTER_REVERSE);
CHKERR VecAssemblyBegin(vol_ele_slow_rhs->ts_F);
CHKERR VecAssemblyEnd(vol_ele_slow_rhs->ts_F);
*vol_ele_slow_rhs->vecAssembleSwitch = false;
}
CHKERR KSPSolve(data->ksp, vol_ele_slow_rhs->ts_F,
vol_ele_slow_rhs->ts_F);
};
vol_ele_slow_rhs->postProcessHook = solve_for_g;
auto det_ptr = boost::make_shared<VectorDouble>();
auto jac_ptr = boost::make_shared<MatrixDouble>();
auto inv_jac_ptr = boost::make_shared<MatrixDouble>();
vol_ele_stiff_rhs->getOpPtrVector().push_back(
vol_ele_stiff_rhs->getOpPtrVector().push_back(
vol_ele_stiff_rhs->getOpPtrVector().push_back(
vol_ele_stiff_rhs->getOpPtrVector().push_back(
vol_ele_stiff_rhs->getOpPtrVector().push_back(
vol_ele_stiff_rhs->getOpPtrVector().push_back(
vol_ele_stiff_lhs->getOpPtrVector().push_back(
vol_ele_stiff_lhs->getOpPtrVector().push_back(
vol_ele_stiff_lhs->getOpPtrVector().push_back(
vol_ele_stiff_lhs->getOpPtrVector().push_back(
auto vol_rule = [](
int,
int,
int p) ->
int {
return 2 *
p; };
vol_ele_slow_rhs->getRuleHook = vol_rule;
vol_ele_stiff_rhs->getRuleHook = vol_rule;
vol_ele_stiff_lhs->getRuleHook = vol_rule;
auto post_proc = boost::make_shared<PostProcEle>(m_field);
boost::shared_ptr<ForcesAndSourcesCore> null;
auto u_ptr = boost::make_shared<VectorDouble>();
post_proc->getOpPtrVector().push_back(
post_proc->getOpPtrVector().push_back(
post_proc->getPostProcMesh(), post_proc->getMapGaussPts(),
{{"u", u_ptr}},
{},
{},
{}
)
);
auto dm = simple_interface->
getDM();
if (!inner_surface.empty()) {
Range inner_surface_verts;
CHKERR moab.get_connectivity(inner_surface, inner_surface_verts,
false);
u0, MBVERTEX, inner_surface_verts, "u");
}
}
ParallelComm *pcomm = ParallelComm::get_pcomm(&moab,
MYPCOMM_INDEX);
CHKERR pcomm->filter_pstatus(edges, PSTATUS_SHARED | PSTATUS_MULTISHARED,
PSTATUS_NOT, -1, &edges_part);
CHKERR moab.get_connectivity(edges_part, edges_verts,
false);
unite(edges_verts, edges_part));
CHKERR MatZeroEntries(data->M);
boost::shared_ptr<Ele> vol_mass_ele(
new Ele(m_field));
vol_mass_ele);
CHKERR MatAssemblyBegin(data->M, MAT_FINAL_ASSEMBLY);
CHKERR MatAssemblyEnd(data->M, MAT_FINAL_ASSEMBLY);
CHKERR KSPSetOperators(data->ksp, data->M, data->M);
CHKERR KSPSetFromOptions(data->ksp);
CHKERR TSSetType(ts, TSARKIMEX);
CHKERR TSARKIMEXSetType(ts, TSARKIMEXA2);
vol_ele_stiff_lhs, null, null);
vol_ele_stiff_rhs, null, null);
vol_ele_slow_rhs, null, null);
boost::shared_ptr<Monitor> monitor_ptr(
new Monitor(dm, post_proc));
monitor_ptr, null, null);
double ftime = 1;
CHKERR TSSetDuration(ts, PETSC_DEFAULT, ftime);
}
return 0;
}
#define CATCH_ERRORS
Catch errors.
@ AINSWORTH_LEGENDRE_BASE
Ainsworth Cole (Legendre) approx. base .
#define MYPCOMM_INDEX
default communicator number PCOMM
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
#define CHKERR
Inline error check.
PetscErrorCode DMMoFEMTSSetIFunction(DM dm, const char fe_name[], MoFEM::FEMethod *method, MoFEM::BasicMethod *pre_only, MoFEM::BasicMethod *post_only)
set TS implicit function evaluation function
PetscErrorCode DMoFEMMeshToLocalVector(DM dm, Vec l, InsertMode mode, ScatterMode scatter_mode)
set local (or ghosted) vector values on mesh for partition only
PetscErrorCode DMCreateMatrix_MoFEM(DM dm, Mat *M)
PetscErrorCode DMoFEMLoopFiniteElements(DM dm, const char fe_name[], MoFEM::FEMethod *method, CacheTupleWeakPtr cache_ptr=CacheTupleSharedPtr())
Executes FEMethod for finite elements in DM.
PetscErrorCode DMMoFEMTSSetIJacobian(DM dm, const std::string fe_name, boost::shared_ptr< MoFEM::FEMethod > method, boost::shared_ptr< MoFEM::BasicMethod > pre_only, boost::shared_ptr< MoFEM::BasicMethod > post_only)
set TS Jacobian evaluation function
PetscErrorCode DMCreateGlobalVector_MoFEM(DM dm, Vec *g)
DMShellSetCreateGlobalVector.
bool checkMeshset(const int ms_id, const CubitBCType cubit_bc_type) const
check for CUBIT Id and CUBIT type
FTensor::Index< 'i', SPACE_DIM > i
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
UBlasMatrix< double > MatrixDouble
UBlasVector< double > VectorDouble
implementation of Data Operators for Forces and Sources
auto createKSP(MPI_Comm comm)
PetscErrorCode DMMoFEMTSSetMonitor(DM dm, TS ts, const std::string fe_name, boost::shared_ptr< MoFEM::FEMethod > method, boost::shared_ptr< MoFEM::BasicMethod > pre_only, boost::shared_ptr< MoFEM::BasicMethod > post_only)
Set Monitor To TS solver.
PetscErrorCode DMMoFEMTSSetRHSFunction(DM dm, const std::string fe_name, boost::shared_ptr< MoFEM::FEMethod > method, boost::shared_ptr< MoFEM::BasicMethod > pre_only, boost::shared_ptr< MoFEM::BasicMethod > post_only)
set TS the right hand side function
auto createTS(MPI_Comm comm)
FaceElementForcesAndSourcesCore::UserDataOperator OpEle
const double D
diffusivity
const int save_every_nth_step
const double r
rate factor
const double u0
inital vale on blocksets
const double k
caring capacity
OpPostProcMapInMoab< SPACE_DIM, SPACE_DIM > OpPPMap
FaceElementForcesAndSourcesCore Ele
virtual moab::Interface & get_moab()=0
virtual MPI_Comm & get_comm() const =0
static MoFEMErrorCode Initialize(int *argc, char ***args, const char file[], const char help[])
Initializes the MoFEM database PETSc, MOAB and MPI.
static MoFEMErrorCode Finalize()
Checks for options to be called at the conclusion of the program.
bool sYmm
If true assume that matrix is symmetric structure.
Deprecated interface functions.
Data on single entity (This is passed as argument to DataOperator::doWork)
FTensor::Tensor1< FTensor::PackPtr< double *, Tensor_Dim >, Tensor_Dim > getFTensor1DiffN(const FieldApproximationBase base)
Get derivatives of base functions.
FTensor::Tensor0< FTensor::PackPtr< double *, 1 > > getFTensor0N(const FieldApproximationBase base)
Get base function as Tensor0.
const VectorInt & getIndices() const
Get global indices of dofs on entity.
default operator for TRI element
auto getFTensor0IntegrationWeight()
Get integration weights.
double getMeasure() const
get measure of element
@ OPROW
operator doWork function is executed on FE rows
@ OPROWCOL
operator doWork is executed on FE rows &columns
const FEMethod * getFEMethod() const
Return raw pointer to Finite Element Method object.
MatrixDouble & getGaussPts()
matrix of integration (Gauss) points for Volume Element
Interface for managing meshsets containing materials and boundary conditions.
Get field gradients at integration pts for scalar filed rank 0, i.e. vector field.
Get rate of scalar field at integration points.
Get value at integration points for scalar field.
Post post-proc data at points from hash maps.
Set inverse jacobian to base functions.
Problem manager is used to build and partition problems.
Simple interface for fast problem set-up.
MoFEMErrorCode loadFile(const std::string options, const std::string mesh_file_name, LoadFileFunc loadFunc=defaultLoadFileFunc)
Load mesh file.
MoFEMErrorCode addDomainField(const std::string &name, const FieldSpace space, const FieldApproximationBase base, const FieldCoefficientsNumber nb_of_coefficients, const TagType tag_type=MB_TAG_SPARSE, const enum MoFEMTypes bh=MF_ZERO, int verb=-1)
Add field on domain.
MoFEMErrorCode getOptions()
get options
MoFEMErrorCode getDM(DM *dm)
Get DM.
MoFEMErrorCode setFieldOrder(const std::string field_name, const int order, const Range *ents=NULL)
Set field order.
MoFEMErrorCode setUp(const PetscBool is_partitioned=PETSC_TRUE)
Setup problem.
const std::string getProblemName() const
Get the Problem Name.
const std::string getDomainFEName() const
Get the Domain FE Name.
intrusive_ptr for managing petsc objects
PetscReal ts_a
shift for U_t (see PETSc Time Solver)
PetscInt ts_step
time step number
MoFEMErrorCode getInterface(IFACE *&iface) const
Get interface refernce to pointer of interface.
[Push operators to pipeline]
SmartPetscObj< KSP > ksp
Linear solver.
VectorDouble dot_val
Rate of values of field "u" at integration points.
VectorDouble val
Values of field "u" at integration points.
SmartPetscObj< Mat > M
Mass matrix.
MatrixDouble grad
Gradients of field "u" at integration points.
boost::shared_ptr< PostProcEle > postProc
MoFEMErrorCode preProcess()
function is run at the beginning of loop
MoFEMErrorCode postProcess()
function is run at the end of loop
MoFEMErrorCode operator()()
function is run for every finite element
MoFEMErrorCode doWork(int row_side, int col_side, EntityType row_type, EntityType col_type, EntData &row_data, EntData &col_data)
Operator for bi-linear form, usually to calculate values on left hand side.
boost::shared_ptr< CommonData > commonData
MoFEMErrorCode doWork(int side, EntityType type, EntData &data)
Operator for linear form, usually to calculate values on right hand side.
boost::shared_ptr< CommonData > commonData
Assemble stiff part tangent.
MoFEMErrorCode doWork(int row_side, int col_side, EntityType row_type, EntityType col_type, EntData &row_data, EntData &col_data)
Operator for bi-linear form, usually to calculate values on left hand side.
boost::shared_ptr< CommonData > commonData
boost::shared_ptr< CommonData > commonData
MoFEMErrorCode doWork(int side, EntityType type, EntData &data)
Operator for linear form, usually to calculate values on right hand side.