};
};
GAUSS>::OpGradTensorGrad<1, SPACE_DIM, SPACE_DIM, 1>;
GAUSS>::OpSource<1, SPACE_DIM>;
constexpr double rho = 1;
constexpr double omega = 2.4;
private:
boost::shared_ptr<MatrixDouble>
matDPtr;
};
auto set_matrial_stiffness = [&]() {
: 1;
auto t_D = getFTensor4DdgFromMat<SPACE_DIM, SPACE_DIM, 0>(*
matDPtr);
};
matDPtr = boost::make_shared<MatrixDouble>();
CHKERR set_matrial_stiffness();
}
}
}
}
simple->getProblemName(),
"U");
}
auto det_ptr = boost::make_shared<VectorDouble>();
auto jac_ptr = boost::make_shared<MatrixDouble>();
auto inv_jac_ptr = boost::make_shared<MatrixDouble>();
auto add_base_transform = [&](auto &pipeline) {
pipeline.push_back(
pipeline.push_back(
};
add_base_transform(pipeline_mng->getOpDomainRhsPipeline());
add_base_transform(pipeline_mng->getOpDomainLhsPipeline());
auto &fe_domain_lhs = pipeline_mng->getDomainLhsFE();
return rho * fe_domain_lhs->ts_aa;
};
auto fe_domain_rhs = pipeline_mng->getDomainRhsFE();
t_source(0) = 0.1;
t_source(1) = 1.;
const auto time = fe_domain_rhs->ts_t;
t_source(
i) *= sin(time *
omega * M_PI);
return t_source;
};
auto henky_common_data_ptr = boost::make_shared<HenckyOps::CommonData>();
henky_common_data_ptr->matDPtr =
matDPtr;
pipeline_mng->getOpDomainLhsPipeline().push_back(
pipeline_mng->getOpDomainLhsPipeline().push_back(
pipeline_mng->getOpDomainLhsPipeline().push_back(
pipeline_mng->getOpDomainLhsPipeline().push_back(
pipeline_mng->getOpDomainLhsPipeline().push_back(
pipeline_mng->getOpDomainLhsPipeline().push_back(
pipeline_mng->getOpDomainLhsPipeline().push_back(
pipeline_mng->getOpDomainLhsPipeline().push_back(
new OpK(
"U",
"U", henky_common_data_ptr->getMatTangent()));
pipeline_mng->getOpDomainLhsPipeline().push_back(
new OpMass(
"U",
"U", get_rho));
pipeline_mng->getOpDomainRhsPipeline().push_back(
pipeline_mng->getOpDomainRhsPipeline().push_back(
pipeline_mng->getOpDomainRhsPipeline().push_back(
pipeline_mng->getOpDomainRhsPipeline().push_back(
pipeline_mng->getOpDomainRhsPipeline().push_back(
pipeline_mng->getOpDomainRhsPipeline().push_back(
pipeline_mng->getOpDomainRhsPipeline().push_back(
"U", henky_common_data_ptr->getMatFirstPiolaStress()));
pipeline_mng->getOpDomainRhsPipeline().push_back(
pipeline_mng->getOpDomainRhsPipeline().push_back(
}
};
auto get_bc_hook = [&]() {
mField, pipeline_mng->getDomainRhsFE(),
{boost::make_shared<TimeScale>()});
return hook;
};
pipeline_mng->getDomainRhsFE()->preProcessHook = get_bc_hook();
}
"out_step_" + boost::lexical_cast<std::string>(
ts_step) +
".h5m");
}
}
private:
boost::shared_ptr<PostProcEle>
postProc;
};
ts = pipeline_mng->createTSIM();
else
ts = pipeline_mng->createTSIM2();
auto post_proc_fe = boost::make_shared<PostProcEle>(
mField);
auto det_ptr = boost::make_shared<VectorDouble>();
auto jac_ptr = boost::make_shared<MatrixDouble>();
auto inv_jac_ptr = boost::make_shared<MatrixDouble>();
post_proc_fe->getOpPtrVector().push_back(
post_proc_fe->getOpPtrVector().push_back(
post_proc_fe->getOpPtrVector().push_back(
post_proc_fe->getOpPtrVector().push_back(
post_proc_fe->getOpPtrVector().push_back(
post_proc_fe->getOpPtrVector().push_back(
auto u_ptr = boost::make_shared<MatrixDouble>();
post_proc_fe->getOpPtrVector().push_back(
post_proc_fe->getOpPtrVector().push_back(
post_proc_fe->getPostProcMesh(), post_proc_fe->getMapGaussPts(),
{},
{{"U", u_ptr}},
{},
{{"STRAIN", matStrainPtr}, {"STRESS", matStressPtr}}
)
);
boost::shared_ptr<FEMethod> null_fe;
auto monitor_ptr = boost::make_shared<Monitor>(dm, post_proc_fe);
null_fe, monitor_ptr);
double ftime = 1;
CHKERR TSSetExactFinalTime(ts, TS_EXACTFINALTIME_MATCHSTEP);
SCATTER_FORWARD);
} else {
}
PetscInt steps, snesfails, rejects, nonlinits, linits;
#if PETSC_VERSION_GE(3, 8, 0)
CHKERR TSGetStepNumber(ts, &steps);
#else
CHKERR TSGetTimeStepNumber(ts, &steps);
#endif
CHKERR TSGetSNESFailures(ts, &snesfails);
CHKERR TSGetStepRejections(ts, &rejects);
CHKERR TSGetSNESIterations(ts, &nonlinits);
CHKERR TSGetKSPIterations(ts, &linits);
"steps %D (%D rejected, %D SNES fails), ftime %g, nonlinits "
"%D, linits %D\n",
steps, rejects, snesfails, ftime, nonlinits, linits);
}
PetscBool test_flg = PETSC_FALSE;
if (test_flg) {
SCATTER_FORWARD);
double nrm2;
MOFEM_LOG(
"EXAMPLE", Sev::inform) <<
"Regression norm " << nrm2;
constexpr double regression_value = 1.09572;
if (fabs(nrm2 - regression_value) > 1e-2)
"Regression test faileed; wrong norm value.");
}
}
}
static char help[] =
"...\n\n";
int main(
int argc,
char *argv[]) {
auto core_log = logging::core::get();
core_log->add_sink(
LogManager::createSink(LogManager::getStrmWorld(), "EXAMPLE"));
LogManager::setLog("EXAMPLE");
try {
DMType dm_name = "DMMOFEM";
moab::Core mb_instance;
moab::Interface &moab = mb_instance;
}
}
#define MOFEM_LOG_C(channel, severity, format,...)
void simple(double P1[], double P2[], double P3[], double c[], const int N)
ElementsAndOps< SPACE_DIM >::DomainEle DomainEle
Kronecker Delta class symmetric.
#define CATCH_ERRORS
Catch errors.
@ AINSWORTH_BERNSTEIN_BEZIER_BASE
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
@ MOFEM_ATOM_TEST_INVALID
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
#define CHKERR
Inline error check.
FormsIntegrators< DomainEleOp >::Assembly< A >::LinearForm< I >::OpGradTimesSymTensor< 1, SPACE_DIM, SPACE_DIM > OpInternalForce
constexpr double shear_modulus_G
constexpr double bulk_modulus_K
FormsIntegrators< DomainEleOp >::Assembly< PETSC >::BiLinearForm< GAUSS >::OpMass< 1, SPACE_DIM > OpMass
PetscErrorCode DMoFEMMeshToLocalVector(DM dm, Vec l, InsertMode mode, ScatterMode scatter_mode)
set local (or ghosted) vector values on mesh for partition only
PetscErrorCode DMRegister_MoFEM(const char sname[])
Register MoFEM problem.
PetscErrorCode DMoFEMLoopFiniteElements(DM dm, const char fe_name[], MoFEM::FEMethod *method, CacheTupleWeakPtr cache_ptr=CacheTupleSharedPtr())
Executes FEMethod for finite elements in DM.
auto smartCreateDMVector(DM dm)
Get smart vector from DM.
#define MOFEM_LOG(channel, severity)
Log.
#define MOFEM_LOG_TAG(channel, tag)
Tag channel.
FTensor::Index< 'k', 3 > k
FormsIntegrators< DomainEleOp >::Assembly< USER_ASSEMBLE >::LinearForm< GAUSS >::OpBaseTimesVector< 1, 3, 1 > OpInertiaForce
FormsIntegrators< DomainEleOp >::Assembly< USER_ASSEMBLE >::LinearForm< GAUSS >::OpSource< 1, 3 > OpBodyForce
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
implementation of Data Operators for Forces and Sources
PetscErrorCode DMMoFEMTSSetMonitor(DM dm, TS ts, const std::string fe_name, boost::shared_ptr< MoFEM::FEMethod > method, boost::shared_ptr< MoFEM::BasicMethod > pre_only, boost::shared_ptr< MoFEM::BasicMethod > post_only)
Set Monitor To TS solver.
PetscErrorCode PetscOptionsGetInt(PetscOptions *, const char pre[], const char name[], PetscInt *ivalue, PetscBool *set)
PetscErrorCode PetscOptionsGetBool(PetscOptions *, const char pre[], const char name[], PetscBool *bval, PetscBool *set)
SmartPetscObj< Vec > smartVectorDuplicate(SmartPetscObj< Vec > &vec)
Create duplicate vector of smart vector.
static double * ts_aa_ptr
static double * ts_time_ptr
OpPostProcMapInMoab< SPACE_DIM, SPACE_DIM > OpPPMap
#define EXECUTABLE_DIMENSION
static constexpr int approx_order
FTensor::Index< 'l', 3 > l
FTensor::Index< 'j', 3 > j
FTensor::Index< 'i', 3 > i
MoFEMErrorCode boundaryCondition()
[Set up problem]
MoFEMErrorCode assembleSystem()
[Applying essential BC]
MoFEMErrorCode readMesh()
[run problem]
boost::shared_ptr< MatrixDouble > matAccelerationPtr
boost::shared_ptr< MatrixDouble > matInertiaPtr
boost::shared_ptr< MatrixDouble > matGradPtr
boost::shared_ptr< MatrixDouble > matStressPtr
boost::shared_ptr< MatrixDouble > matStrainPtr
MoFEMErrorCode checkResults()
[Postprocess results]
MoFEMErrorCode solveSystem()
[Push operators to pipeline]
MoFEMErrorCode createCommonData()
[Set up problem]
boost::shared_ptr< MatrixDouble > matTangentPtr
MoFEMErrorCode runProblem()
[Run problem]
MoFEM::Interface & mField
MoFEMErrorCode setupProblem()
[Run problem]
MoFEMErrorCode outputResults()
[Solve]
boost::shared_ptr< MatrixDouble > matDPtr
Simple interface for fast problem set-up.
static MoFEMErrorCode Initialize(int *argc, char ***args, const char file[], const char help[])
Initializes the MoFEM database PETSc, MOAB and MPI.
static MoFEMErrorCode Finalize()
Checks for options to be called at the conclusion of the program.
Deprecated interface functions.
Definition of the displacement bc data structure.
Data on single entity (This is passed as argument to DataOperator::doWork)
Class (Function) to enforce essential constrains.
Approximate field values for given petsc vector.
Get values at integration pts for tensor filed rank 1, i.e. vector field.
Post post-proc data at points from hash maps.
Set inverse jacobian to base functions.
Set inverse jacobian to base functions.
PipelineManager interface.
Simple interface for fast problem set-up.
intrusive_ptr for managing petsc objects
PetscInt ts_step
time step number
MoFEMErrorCode getInterface(IFACE *&iface) const
Get interface refernce to pointer of interface.
Volume finite element base.
MoFEMErrorCode postProcess()
function is run at the end of loop
boost::shared_ptr< PostProcEle > postProc
constexpr double eps = std::numeric_limits<float>::epsilon();
auto f = [](
double v) {
return 0.5 * std::log(
v); };
auto d_f = [](
double v) {
return 0.5 /
v; };
auto dd_f = [](
double v) {
return -0.5 / (
v *
v); };
struct isEq {
static inline auto check(
const double &
a,
const double &b) {
}
};
inline auto is_eq(
const double &
a,
const double &b) {
};
template <
int DIM>
inline auto get_uniq_nb(
double *ptr) {
std::array<double, DIM> tmp;
std::copy(ptr, &ptr[DIM], tmp.begin());
std::sort(tmp.begin(), tmp.end());
isEq::absMax = std::max(std::abs(tmp[0]), std::abs(tmp[DIM - 1]));
return std::distance(tmp.begin(), std::unique(tmp.begin(), tmp.end(),
is_eq));
};
template <int DIM>
std::max(std::max(std::abs(eig(0)), std::abs(eig(1))), std::abs(eig(2)));
if (
is_eq(eig(0), eig(1))) {
}
else if (
is_eq(eig(0), eig(2))) {
}
else if (
is_eq(eig(1), eig(2))) {
}
eigen_vec(
i, 0), eigen_vec(
i, 1), eigen_vec(
i, 2),
eigen_vec(
j, 0), eigen_vec(
j, 1), eigen_vec(
j, 2),
eigen_vec(
k, 0), eigen_vec(
k, 1), eigen_vec(
k, 2)};
{
eigen_vec(
i,
j) = eigen_vec_c(
i,
j);
}
};
struct CommonData :
public boost::enable_shared_from_this<CommonData> {
boost::shared_ptr<MatrixDouble>
matDPtr;
return boost::shared_ptr<MatrixDouble>(shared_from_this(),
}
return boost::shared_ptr<MatrixDouble>(shared_from_this(),
}
return boost::shared_ptr<MatrixDouble>(shared_from_this(), &
matLogC);
}
return boost::shared_ptr<MatrixDouble>(shared_from_this(), &
matTangent);
}
};
template <
int DIM>
struct OpCalculateEigenVals :
public DomainEleOp {
boost::shared_ptr<CommonData> common_data)
}
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(
commonDataPtr->matGradPtr));
auto t_eig_val = getFTensor1FromMat<DIM>(
commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(
commonDataPtr->matEigVec);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
for (int ii = 0; ii != DIM; ii++)
for (int jj = 0; jj != DIM; jj++)
eigen_vec(ii, jj) = C(ii, jj);
CHKERR computeEigenValuesSymmetric<DIM>(eigen_vec, eig);
for(auto ii = 0;ii!=DIM;++ii)
eig(ii) = std::max(std::numeric_limits<double>::epsilon(), eig(ii));
auto nb_uniq = get_uniq_nb<DIM>(&eig(0));
if constexpr (DIM == 3) {
if (nb_uniq == 2) {
sort_eigen_vals<DIM>(eig, eigen_vec);
}
}
t_eig_vec(
i,
j) = eigen_vec(
i,
j);
++t_grad;
++t_eig_val;
++t_eig_vec;
}
}
private:
};
template <
int DIM>
struct OpCalculateLogC :
public DomainEleOp {
boost::shared_ptr<CommonData> common_data)
}
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
auto t_eig_val = getFTensor1FromMat<DIM>(
commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(
commonDataPtr->matEigVec);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(
commonDataPtr->matLogC);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
auto nb_uniq = get_uniq_nb<DIM>(&(t_eig_val(0)));
eigen_vec(
i,
j) = t_eig_vec(
i,
j);
t_logC(
i,
j) = logC(
i,
j);
++t_eig_val;
++t_eig_vec;
++t_logC;
}
}
private:
};
template <
int DIM>
struct OpCalculateLogC_dC :
public DomainEleOp {
boost::shared_ptr<CommonData> common_data)
}
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(
commonDataPtr->matLogCdC);
auto t_eig_val = getFTensor1FromMat<DIM>(
commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(
commonDataPtr->matEigVec);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
eigen_vec(
i,
j) = t_eig_vec(
i,
j);
auto nb_uniq = get_uniq_nb<DIM>(&eig(0));
dlogC_dC(
i,
j,
k,
l) *= 2;
t_logC_dC(
i,
j,
k,
l) = dlogC_dC(
i,
j,
k,
l);
++t_logC_dC;
++t_eig_val;
++t_eig_vec;
}
}
private:
};
template <
int DIM>
struct OpCalculateHenckyStress :
public DomainEleOp {
boost::shared_ptr<CommonData> common_data)
}
auto t_D = getFTensor4DdgFromMat<DIM, DIM, 0>(*
commonDataPtr->matDPtr);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(
commonDataPtr->matLogC);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
auto t_T = getFTensor2SymmetricFromMat<DIM>(
commonDataPtr->matHenckyStress);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
t_T(
i,
j) = t_D(
i,
j,
k,
l) * t_logC(
k,
l);
++t_logC;
++t_T;
++t_D;
}
}
private:
};
template <
int DIM>
struct OpCalculateHenckyPlasticStress :
public DomainEleOp {
boost::shared_ptr<CommonData> common_data,
boost::shared_ptr<MatrixDouble> mat_D_ptr,
}
auto t_D = getFTensor4DdgFromMat<DIM, DIM, 0>(*
matDPtr);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(
commonDataPtr->matLogC);
auto t_logCPlastic = getFTensor2SymmetricFromMat<DIM>(*
matLogCPlastic);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
auto t_T = getFTensor2SymmetricFromMat<DIM>(
commonDataPtr->matHenckyStress);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
t_T(
i,
j) = t_D(
i,
j,
k,
l) * (t_logC(
k,
l) - t_logCPlastic(
k,
l));
++t_logC;
++t_T;
++t_D;
++t_logCPlastic;
}
}
private:
boost::shared_ptr<MatrixDouble>
matDPtr;
};
template <
int DIM>
struct OpCalculatePiolaStress :
public DomainEleOp {
boost::shared_ptr<CommonData> common_data)
}
auto t_D = getFTensor4DdgFromMat<DIM, DIM, 0>(*
commonDataPtr->matDPtr);
auto t_logC = getFTensor2SymmetricFromMat<DIM>(
commonDataPtr->matLogC);
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(
commonDataPtr->matLogCdC);
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matFirstPiolaStress.resize(DIM * DIM, nb_gauss_pts,
false);
auto t_P = getFTensor2FromMat<DIM, DIM>(
commonDataPtr->matFirstPiolaStress);
auto t_T = getFTensor2SymmetricFromMat<DIM>(
commonDataPtr->matHenckyStress);
auto t_S =
getFTensor2SymmetricFromMat<DIM>(
commonDataPtr->matSecondPiolaStress);
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(
commonDataPtr->matGradPtr));
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
t_S(
k,
l) = t_T(
i,
j) * t_logC_dC(
i,
j,
k,
l);
t_P(
i,
l) = t_F(
i,
k) * t_S(
k,
l);
++t_grad;
++t_logC;
++t_logC_dC;
++t_P;
++t_T;
++t_S;
++t_D;
}
}
private:
};
template <
int DIM>
struct OpHenckyTangent :
public DomainEleOp {
boost::shared_ptr<CommonData> common_data,
boost::shared_ptr<MatrixDouble> mat_D_ptr = nullptr)
if (mat_D_ptr)
else
}
constexpr auto size_symm = (DIM * (DIM + 1)) / 2;
commonDataPtr->matTangent.resize(DIM * DIM * DIM * DIM, nb_gauss_pts);
auto dP_dF =
getFTensor4FromMat<DIM, DIM, DIM, DIM, 1>(
commonDataPtr->matTangent);
auto t_D = getFTensor4DdgFromMat<DIM, DIM, 0>(*
matDPtr);
auto t_eig_val = getFTensor1FromMat<DIM>(
commonDataPtr->matEigVal);
auto t_eig_vec = getFTensor2FromMat<DIM, DIM>(
commonDataPtr->matEigVec);
auto t_T = getFTensor2SymmetricFromMat<DIM>(
commonDataPtr->matHenckyStress);
auto t_S =
getFTensor2SymmetricFromMat<DIM>(
commonDataPtr->matSecondPiolaStress);
auto t_grad = getFTensor2FromMat<DIM, DIM>(*(
commonDataPtr->matGradPtr));
auto t_logC_dC = getFTensor4DdgFromMat<DIM, DIM>(
commonDataPtr->matLogCdC);
for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
eigen_vec(
i,
j) = t_eig_vec(
i,
j);
auto nb_uniq = get_uniq_nb<DIM>(&eig(0));
auto TL =
P_D_P_plus_TL(
i,
j,
k,
l) =
(t_logC_dC(
i,
j,
o,
p) * t_D(
o,
p,
m,
n)) * t_logC_dC(
m,
n,
k,
l);
P_D_P_plus_TL(
i,
j,
k,
l) *= 0.5;
t_F(
i,
k) * (P_D_P_plus_TL(
k,
j,
o,
p) * dC_dF(
o,
p,
m,
n));
++dP_dF;
++t_grad;
++t_eig_val;
++t_eig_vec;
++t_logC_dC;
++t_S;
++t_T;
++t_D;
}
}
private:
boost::shared_ptr<MatrixDouble>
matDPtr;
};
template <
int DIM>
struct OpPostProcHencky :
public DomainEleOp {
moab::Interface &post_proc_mesh,
std::vector<EntityHandle> &map_gauss_pts,
boost::shared_ptr<CommonData> common_data_ptr);
private:
};
template <int DIM>
const std::string
field_name, moab::Interface &post_proc_mesh,
std::vector<EntityHandle> &map_gauss_pts,
boost::shared_ptr<CommonData> common_data_ptr)
mapGaussPts(map_gauss_pts), commonDataPtr(common_data_ptr) {
std::fill(&doEntities[MBEDGE], &doEntities[MBMAXTYPE], false);
}
template <int DIM>
std::array<double, 9> def;
std::fill(def.begin(), def.end(), 0);
auto get_tag = [&](const std::string name, size_t size) {
CHKERR postProcMesh.tag_get_handle(name.c_str(), size, MB_TYPE_DOUBLE,
th,
MB_TAG_CREAT | MB_TAG_SPARSE,
def.data());
};
auto set_matrix = [&](
auto &
t) -> MatrixDouble3by3 & {
mat.clear();
return mat;
};
return postProcMesh.tag_set_data(th, &mapGaussPts[gg], 1,
&*mat.data().begin());
};
auto th_grad = get_tag("GRAD", 9);
auto th_stress = get_tag("STRESS", 9);
auto t_piola =
getFTensor2FromMat<DIM, DIM>(commonDataPtr->matFirstPiolaStress);
auto t_grad = getFTensor2FromMat<DIM, DIM>(*commonDataPtr->matGradPtr);
for (int gg = 0; gg != commonDataPtr->matGradPtr->size2(); ++gg) {
double inv_t_detF;
if (DIM == 2)
else
inv_t_detF = 1. / inv_t_detF;
cauchy_stress(
i,
j) = inv_t_detF * (t_piola(
i,
k) ^ F(
j,
k));
CHKERR set_tag(th_grad, gg, set_matrix(t_grad));
CHKERR set_tag(th_stress, gg, set_matrix(cauchy_stress));
++t_piola;
++t_grad;
}
}
}
FTensor::Index< 'n', SPACE_DIM > n
FTensor::Index< 'm', SPACE_DIM > m
FTensor::Index< 'i', SPACE_DIM > i
const double c
speed of light (cm/ns)
const double v
phase velocity of light in medium (cm/ns)
FTensor::Index< 'l', 3 > l
FTensor::Index< 'j', 3 > j
FTensor::Ddg< double, 3, 3 > getDiffMat(Val< double, 3 > &t_val, Vec< double, 3 > &t_vec, Fun< double > f, Fun< double > d_f, const int nb)
Get the Diff Mat object.
FTensor::Tensor2_symmetric< double, 3 > getMat(Val< double, 3 > &t_val, Vec< double, 3 > &t_vec, Fun< double > f)
Get the Mat object.
FTensor::Ddg< double, 3, 3 > getDiffDiffMat(Val< double, 3 > &t_val, Vec< double, 3 > &t_vec, Fun< double > f, Fun< double > d_f, Fun< double > dd_f, FTensor::Tensor2< double, 3, 3 > &t_S, const int nb)
Tensor2_Expr< Kronecker_Delta< T >, T, Dim0, Dim1, i, j > kronecker_delta(const Index< i, Dim0 > &, const Index< j, Dim1 > &)
Rank 2.
auto get_uniq_nb(double *ptr)
auto sort_eigen_vals(FTensor::Tensor1< double, DIM > &eig, FTensor::Tensor2< double, DIM, DIM > &eigen_vec)
auto is_eq(const double &a, const double &b)
UBlasMatrix< double > MatrixDouble
MatrixBoundedArray< double, 9 > MatrixDouble3by3
MoFEMErrorCode determinantTensor2by2(T1 &t, T2 &det)
Calculate determinant 2 by 2.
MoFEMErrorCode determinantTensor3by3(T1 &t, T2 &det)
Calculate determinant 3 by 3.
constexpr double t
plate stiffness
constexpr auto field_name
auto getMatHenckyStress()
auto getMatFirstPiolaStress()
boost::shared_ptr< MatrixDouble > matLogCPlastic
MatrixDouble matFirstPiolaStress
boost::shared_ptr< MatrixDouble > matDPtr
MatrixDouble matSecondPiolaStress
MatrixDouble matHenckyStress
boost::shared_ptr< MatrixDouble > matGradPtr
boost::shared_ptr< CommonData > commonDataPtr
MoFEMErrorCode doWork(int side, EntityType type, EntData &data)
Operator for linear form, usually to calculate values on right hand side.
OpCalculateEigenVals(const std::string field_name, boost::shared_ptr< CommonData > common_data)
OpCalculateHenckyPlasticStress(const std::string field_name, boost::shared_ptr< CommonData > common_data, boost::shared_ptr< MatrixDouble > mat_D_ptr, const double scale=1)
boost::shared_ptr< MatrixDouble > matDPtr
boost::shared_ptr< MatrixDouble > matLogCPlastic
boost::shared_ptr< CommonData > commonDataPtr
MoFEMErrorCode doWork(int side, EntityType type, EntData &data)
Operator for linear form, usually to calculate values on right hand side.
OpCalculateHenckyStress(const std::string field_name, boost::shared_ptr< CommonData > common_data)
boost::shared_ptr< CommonData > commonDataPtr
MoFEMErrorCode doWork(int side, EntityType type, EntData &data)
Operator for linear form, usually to calculate values on right hand side.
OpCalculateLogC_dC(const std::string field_name, boost::shared_ptr< CommonData > common_data)
MoFEMErrorCode doWork(int side, EntityType type, EntData &data)
Operator for linear form, usually to calculate values on right hand side.
boost::shared_ptr< CommonData > commonDataPtr
boost::shared_ptr< CommonData > commonDataPtr
MoFEMErrorCode doWork(int side, EntityType type, EntData &data)
Operator for linear form, usually to calculate values on right hand side.
OpCalculateLogC(const std::string field_name, boost::shared_ptr< CommonData > common_data)
MoFEMErrorCode doWork(int side, EntityType type, EntData &data)
Operator for linear form, usually to calculate values on right hand side.
boost::shared_ptr< CommonData > commonDataPtr
OpCalculatePiolaStress(const std::string field_name, boost::shared_ptr< CommonData > common_data)
boost::shared_ptr< CommonData > commonDataPtr
MoFEMErrorCode doWork(int side, EntityType type, EntData &data)
Operator for linear form, usually to calculate values on right hand side.
boost::shared_ptr< MatrixDouble > matDPtr
OpHenckyTangent(const std::string field_name, boost::shared_ptr< CommonData > common_data, boost::shared_ptr< MatrixDouble > mat_D_ptr=nullptr)
MoFEMErrorCode doWork(int side, EntityType type, EntData &data)
[Postprocessing]
OpPostProcHencky(const std::string field_name, moab::Interface &post_proc_mesh, std::vector< EntityHandle > &map_gauss_pts, boost::shared_ptr< CommonData > common_data_ptr)
std::vector< EntityHandle > & mapGaussPts
moab::Interface & postProcMesh
boost::shared_ptr< CommonData > commonDataPtr
static auto check(const double &a, const double &b)
std::array< bool, MBMAXTYPE > doEntities
If true operator is executed for entity.
@ OPROW
operator doWork function is executed on FE rows
MatrixDouble & getGaussPts()
matrix of integration (Gauss) points for Volume Element