6 #ifndef __USER_DATA_OPERATORS_HPP__
7 #define __USER_DATA_OPERATORS_HPP__
22 template <
class T,
class A>
28 boost::shared_ptr<ublas::vector<T, A>> data_ptr,
29 const EntityType zero_type = MBVERTEX)
38 boost::shared_ptr<ublas::vector<T, A>> data_ptr,
57 boost::shared_ptr<ublas::vector<T, A>>
dataPtr;
67 template <
class T,
class A>
101 vec.resize(nb_gauss_pts,
false);
114 for (
int i = 0;
i != local_indices.size(); ++
i)
115 if (local_indices[
i] != -1)
123 const size_t nb_base_functions = data.
getN().size2();
126 for (
size_t gg = 0; gg != nb_gauss_pts; ++gg) {
129 for (; bb != nb_dofs; ++bb) {
130 values_at_gauss_pts += field_data * base_function;
135 for (; bb < nb_base_functions; ++bb)
137 ++values_at_gauss_pts;
155 template <PetscData::DataContext CTX>
160 const std::string
field_name, boost::shared_ptr<VectorDouble> data_ptr,
161 const EntityType zero_at_type = MBVERTEX)
177 vec.resize(nb_gauss_pts,
false);
182 const size_t nb_dofs = local_indices.size();
187 auto get_array = [&](
const auto ctx,
auto vec) {
193 <<
"In this case filed degrees of freedom are read from vector. "
194 "That usually happens when time solver is used, and acces to "
195 "first or second rates is needed. You probably not set ts_u, "
196 "ts_u_t, or ts_u_tt and associated data structure, i.e. "
197 "data_ctx to CTX_SET_X, CTX_SET_X_T, or CTX_SET_X_TT "
202 CHKERR VecGetArrayRead(vec, &array);
206 auto restore_array = [&](
auto vec) {
207 return VecRestoreArrayRead(vec, &array);
222 "That case is not implemented");
225 std::array<double, MAX_DOFS_ON_ENTITY> dot_dofs_vector;
226 for (
int i = 0;
i != local_indices.size(); ++
i)
227 if (local_indices[
i] != -1)
228 dot_dofs_vector[
i] = array[local_indices[
i]];
230 dot_dofs_vector[
i] = 0;
244 "That case is not implemented");
247 const size_t nb_base_functions = data.
getN().size2();
251 for (
size_t gg = 0; gg != nb_gauss_pts; ++gg) {
253 for (; bb != nb_dofs; ++bb) {
254 values_at_gauss_pts += dot_dofs_vector[bb] * base_function;
259 for (; bb < nb_base_functions; ++bb)
261 ++values_at_gauss_pts;
292 template <
int Tensor_Dim,
class T,
class L,
class A>
298 boost::shared_ptr<ublas::matrix<T, L, A>> data_ptr,
299 const EntityType zero_type = MBVERTEX)
318 boost::shared_ptr<ublas::matrix<T, L, A>>
dataPtr;
322 template <
int Tensor_Dim,
class T,
class L,
class A>
328 "Not implemented for T = %s and dim = %d",
338 template <
int Tensor_Dim>
344 boost::shared_ptr<MatrixDouble> data_ptr,
345 const EntityType zero_type = MBVERTEX)
354 boost::shared_ptr<MatrixDouble> data_ptr,
356 const EntityType zero_type = MBVERTEX)
378 template <
int Tensor_Dim>
385 const size_t nb_gauss_pts = getGaussPts().size2();
386 auto &mat = *dataPtr;
387 if (
type == zeroType || mat.size2() != nb_gauss_pts) {
388 mat.resize(Tensor_Dim, nb_gauss_pts,
false);
395 if (dataVec.use_count()) {
396 dotVector.resize(nb_dofs,
false);
398 CHKERR VecGetArrayRead(dataVec, &array);
400 for (
int i = 0;
i != local_indices.size(); ++
i)
401 if (local_indices[
i] != -1)
402 dotVector[
i] = array[local_indices[
i]];
405 CHKERR VecRestoreArrayRead(dataVec, &array);
410 const size_t nb_base_functions = data.
getN().size2();
412 auto values_at_gauss_pts = getFTensor1FromMat<Tensor_Dim>(mat);
414 const size_t size = nb_dofs / Tensor_Dim;
415 if (nb_dofs % Tensor_Dim) {
417 "Nb. of DOFs is inconsistent with Tensor_Dim");
419 for (
size_t gg = 0; gg != nb_gauss_pts; ++gg) {
423 if (field_data.l2() != field_data.l2()) {
424 MOFEM_LOG(
"SELF", Sev::error) <<
"field data: " << field_data;
426 "Wrong number in coefficients");
431 for (; bb != size; ++bb) {
434 if (base_function != base_function) {
435 MOFEM_LOG(
"SELF", Sev::error) <<
"base finction: " << base_function;
437 "Wrong number number in base functions");
441 values_at_gauss_pts(
I) += field_data(
I) * base_function;
447 for (; bb < nb_base_functions; ++bb)
449 ++values_at_gauss_pts;
453 if (dataVec.use_count()) {
465 template <
int Tensor_Dim>
468 Tensor_Dim, double, ublas::row_major, DoubleAllocator> {
485 template <
int Tensor_Dim, CoordinateTypes COORDINATE_SYSTEM = CARTESIAN>
490 const std::string
field_name, boost::shared_ptr<VectorDouble> data_ptr,
491 const EntityType zero_type = MBVERTEX)
504 if constexpr (COORDINATE_SYSTEM ==
POLAR || COORDINATE_SYSTEM ==
SPHERICAL)
506 "%s coordiante not implemented",
512 vec.resize(nb_gauss_pts,
false);
520 const size_t nb_base_functions = data.
getN().size2();
523 const size_t size = nb_dofs / Tensor_Dim;
525 if (nb_dofs % Tensor_Dim) {
527 "Number of dofs should multiple of dimensions");
532 if constexpr (COORDINATE_SYSTEM ==
CARTESIAN) {
534 for (
size_t gg = 0; gg != nb_gauss_pts; ++gg) {
537 for (; bb != size; ++bb) {
538 values_at_gauss_pts += field_data(
I) * diff_base_function(
I);
540 ++diff_base_function;
544 for (; bb < nb_base_functions; ++bb)
545 ++diff_base_function;
546 ++values_at_gauss_pts;
556 for (
size_t gg = 0; gg != nb_gauss_pts; ++gg) {
559 for (; bb != size; ++bb) {
560 values_at_gauss_pts += field_data(
I) * diff_base_function(
I);
561 values_at_gauss_pts +=
562 base_function * (field_data(0) / t_coords(0));
565 ++diff_base_function;
569 for (; bb < nb_base_functions; ++bb) {
571 ++diff_base_function;
573 ++values_at_gauss_pts;
594 template <
int Tensor_Dim, PetscData::DataContext CTX>
599 const std::string
field_name, boost::shared_ptr<MatrixDouble> data_ptr,
600 const EntityType zero_at_type = MBVERTEX)
612 const size_t nb_dofs = local_indices.size();
617 mat.resize(Tensor_Dim, nb_gauss_pts,
false);
625 auto get_array = [&](
const auto ctx,
auto vec) {
631 <<
"In this case filed degrees of freedom are read from vector. "
632 "That usually happens when time solver is used, and acces to "
633 "first or second rates is needed. You probably not set ts_u, "
634 "ts_u_t, or ts_u_tt and associated data structure, i.e. "
635 "data_ctx to CTX_SET_X, CTX_SET_X_T, or CTX_SET_X_TT "
640 CHKERR VecGetArrayRead(vec, &array);
644 auto restore_array = [&](
auto vec) {
645 return VecRestoreArrayRead(vec, &array);
660 "That case is not implemented");
664 for (
int i = 0;
i != local_indices.size(); ++
i)
665 if (local_indices[
i] != -1)
682 "That case is not implemented");
685 const size_t nb_base_functions = data.
getN().size2();
687 auto values_at_gauss_pts = getFTensor1FromMat<Tensor_Dim>(mat);
690 const size_t size = nb_dofs / Tensor_Dim;
691 if (nb_dofs % Tensor_Dim) {
694 for (
size_t gg = 0; gg != nb_gauss_pts; ++gg) {
695 auto field_data = getFTensor1FromArray<Tensor_Dim, Tensor_Dim>(
dotVector);
697 for (; bb != size; ++bb) {
698 values_at_gauss_pts(
I) += field_data(
I) * base_function;
704 for (; bb < nb_base_functions; ++bb)
706 ++values_at_gauss_pts;
722 template <
int Tensor_Dim>
732 template <
int Tensor_Dim>
746 template <
int Tensor_Dim0,
int Tensor_Dim1,
class T,
class L,
class A>
752 boost::shared_ptr<ublas::matrix<T, L, A>> data_ptr,
753 const EntityType zero_type = MBVERTEX)
765 boost::shared_ptr<ublas::matrix<T, L, A>>
dataPtr;
769 template <
int Tensor_Dim0,
int Tensor_Dim1,
class T,
class L,
class A>
775 "Not implemented for T = %s, dim0 = %d and dim1 = %d",
777 Tensor_Dim0, Tensor_Dim1);
781 template <
int Tensor_Dim0,
int Tensor_Dim1>
789 ublas::matrix<double, ublas::row_major, DoubleAllocator>>
791 const EntityType zero_type = MBVERTEX)
802 ublas::matrix<double, ublas::row_major, DoubleAllocator>>
816 boost::shared_ptr<ublas::matrix<double, ublas::row_major, DoubleAllocator>>
823 template <
int Tensor_Dim0,
int Tensor_Dim1>
831 const size_t nb_gauss_pts = data.
getN().size1();
832 if (
type == zeroType) {
833 mat.resize(Tensor_Dim0 * Tensor_Dim1, nb_gauss_pts,
false);
839 if (dataVec.use_count()) {
840 dotVector.resize(nb_dofs,
false);
842 CHKERR VecGetArrayRead(dataVec, &array);
844 for (
int i = 0;
i != local_indices.size(); ++
i)
845 if (local_indices[
i] != -1)
846 dotVector[
i] = array[local_indices[
i]];
849 CHKERR VecRestoreArrayRead(dataVec, &array);
854 const size_t nb_base_functions = data.
getN().size2();
856 auto values_at_gauss_pts =
857 getFTensor2FromMat<Tensor_Dim0, Tensor_Dim1>(mat);
860 const size_t size = nb_dofs / (Tensor_Dim0 * Tensor_Dim1);
861 for (
size_t gg = 0; gg != nb_gauss_pts; ++gg) {
864 for (; bb != size; ++bb) {
865 values_at_gauss_pts(
i,
j) += field_data(
i,
j) * base_function;
869 for (; bb < nb_base_functions; ++bb)
871 ++values_at_gauss_pts;
874 if (dataVec.use_count()) {
886 template <
int Tensor_Dim0,
int Tensor_Dim1>
889 Tensor_Dim0, Tensor_Dim1, double, ublas::row_major, DoubleAllocator> {
901 template <
int Tensor_Dim0,
int Tensor_Dim1>
906 boost::shared_ptr<MatrixDouble> data_ptr,
907 const EntityType zero_at_type = MBVERTEX)
922 mat.resize(Tensor_Dim0 * Tensor_Dim1, nb_gauss_pts,
false);
926 const size_t nb_dofs = local_indices.size();
931 for (
size_t i = 0;
i != local_indices.size(); ++
i)
932 if (local_indices[
i] != -1)
938 const size_t nb_base_functions = data.
getN().size2();
941 auto values_at_gauss_pts =
942 getFTensor2FromMat<Tensor_Dim0, Tensor_Dim1>(mat);
945 const size_t size = nb_dofs / (Tensor_Dim0 * Tensor_Dim1);
946 for (
size_t gg = 0; gg != nb_gauss_pts; ++gg) {
948 getFTensor2FromPtr<Tensor_Dim0, Tensor_Dim1>(&*
dotVector.begin());
950 for (; bb != size; ++bb) {
951 values_at_gauss_pts(
i,
j) += field_data(
i,
j) * base_function;
955 for (; bb < nb_base_functions; ++bb)
957 ++values_at_gauss_pts;
976 template <
int Tensor_Dim>
981 const std::string
field_name, boost::shared_ptr<MatrixDouble> data_ptr,
982 const EntityType zero_type = MBEDGE,
const int zero_side = 0)
991 const std::string
field_name, boost::shared_ptr<MatrixDouble> data_ptr,
993 const int zero_side = 0)
1008 mat.resize((Tensor_Dim * (Tensor_Dim + 1)) / 2, nb_gauss_pts,
false);
1017 const double *array;
1020 for (
int i = 0;
i != local_indices.size(); ++
i)
1021 if (local_indices[
i] != -1)
1029 const int nb_base_functions = data.
getN().size2();
1031 auto values_at_gauss_pts = getFTensor2SymmetricFromMat<Tensor_Dim>(mat);
1034 const int size = nb_dofs / ((Tensor_Dim * (Tensor_Dim + 1)) / 2);
1035 for (
int gg = 0; gg != nb_gauss_pts; ++gg) {
1038 for (; bb != size; ++bb) {
1039 values_at_gauss_pts(
i,
j) += field_data(
i,
j) * base_function;
1043 for (; bb < nb_base_functions; ++bb)
1045 ++values_at_gauss_pts;
1070 template <
int Tensor_Dim>
1075 const std::string
field_name, boost::shared_ptr<MatrixDouble> data_ptr,
1076 const EntityType zero_type = MBEDGE,
const int zero_side = 0)
1089 constexpr
auto symm_size = (Tensor_Dim * (Tensor_Dim + 1)) / 2;
1091 mat.resize(symm_size, nb_gauss_pts,
false);
1095 const int nb_dofs = local_indices.size();
1103 <<
"In this case filed degrees of freedom are read from vector. "
1104 "That usually happens when time solver is used, and acces to "
1105 "first rates is needed. You probably not set "
1106 "ts_u_t and associated data structure data_ctx to CTX_SET_X_T "
1113 const double *array;
1115 for (
int i = 0;
i != local_indices.size(); ++
i)
1116 if (local_indices[
i] != -1)
1122 const int nb_base_functions = data.
getN().size2();
1125 auto values_at_gauss_pts = getFTensor2SymmetricFromMat<Tensor_Dim>(mat);
1128 const int size = nb_dofs / symm_size;
1129 for (
int gg = 0; gg != nb_gauss_pts; ++gg) {
1130 auto field_data = getFTensorDotData<Tensor_Dim>();
1132 for (; bb != size; ++bb) {
1133 values_at_gauss_pts(
i,
j) += field_data(
i,
j) * base_function;
1137 for (; bb < nb_base_functions; ++bb)
1139 ++values_at_gauss_pts;
1152 static_assert(Dim || !Dim,
"not implemented");
1161 &dotVector[0], &dotVector[1], &dotVector[2], &dotVector[3], &dotVector[4],
1170 &dotVector[0], &dotVector[1], &dotVector[2]);
1184 template <
int Tensor_Dim,
class T,
class L,
class A>
1196 template <
int Tensor_Dim>
1200 Tensor_Dim, double, ublas::row_major, DoubleAllocator> {
1222 template <
int Tensor_Dim>
1229 const size_t nb_gauss_pts = this->getGaussPts().size2();
1230 auto &mat = *this->dataPtr;
1231 if (
type == this->zeroType) {
1232 mat.resize(Tensor_Dim, nb_gauss_pts,
false);
1240 const int nb_base_functions = data.
getN().size2();
1242 auto gradients_at_gauss_pts = getFTensor1FromMat<Tensor_Dim>(mat);
1245 if (nb_dofs > nb_base_functions)
1247 "Number of base functions inconsistent with number of DOFs "
1249 nb_dofs, nb_base_functions);
1251 if (data.
getDiffN().size2() != nb_base_functions * Tensor_Dim)
1254 "Number of base functions inconsistent with number of derivatives "
1256 data.
getDiffN().size2(), nb_base_functions);
1258 if (data.
getDiffN().size1() != nb_gauss_pts)
1261 "Number of base functions inconsistent with number of integration "
1263 data.
getDiffN().size2(), nb_gauss_pts);
1268 for (
int gg = 0; gg < nb_gauss_pts; ++gg) {
1271 for (; bb != nb_dofs; ++bb) {
1272 gradients_at_gauss_pts(
I) += field_data * diff_base_function(
I);
1274 ++diff_base_function;
1277 for (; bb < nb_base_functions; ++bb)
1278 ++diff_base_function;
1279 ++gradients_at_gauss_pts;
1292 template <
int Tensor_Dim>
1295 Tensor_Dim, double, ublas::row_major, DoubleAllocator> {
1305 template <
int Tensor_Dim>
1308 Tensor_Dim, double, ublas::row_major, DoubleAllocator> {
1325 template <
int Tensor_Dim>
1330 const size_t nb_gauss_pts = this->getGaussPts().size2();
1331 auto &mat = *this->dataPtr;
1332 if (
type == this->zeroType) {
1333 mat.resize(Tensor_Dim * Tensor_Dim, nb_gauss_pts,
false);
1341 const int nb_base_functions = data.
getN().size2();
1343 auto &hessian_base = data.
getN(BaseDerivatives::SecondDerivative);
1345 if (hessian_base.size1() != nb_gauss_pts) {
1347 "Wrong number of integration pts (%d != %d)",
1348 hessian_base.size1(), nb_gauss_pts);
1350 if (hessian_base.size2() != nb_base_functions * Tensor_Dim * Tensor_Dim) {
1352 "Wrong number of base functions (%d != %d)",
1353 hessian_base.size2() / (Tensor_Dim * Tensor_Dim),
1356 if (hessian_base.size2() < nb_dofs * Tensor_Dim * Tensor_Dim) {
1358 "Wrong number of base functions (%d < %d)",
1359 hessian_base.size2(), nb_dofs * Tensor_Dim * Tensor_Dim);
1363 auto t_diff2_base_function = getFTensor2FromPtr<Tensor_Dim, Tensor_Dim>(
1364 &*hessian_base.data().begin());
1366 auto hessian_at_gauss_pts =
1367 getFTensor2FromMat<Tensor_Dim, Tensor_Dim>(mat);
1371 for (
int gg = 0; gg != nb_gauss_pts; ++gg) {
1374 for (; bb != nb_dofs; ++bb) {
1375 hessian_at_gauss_pts(
I,
J) +=
1376 field_data * t_diff2_base_function(
I,
J);
1378 ++t_diff2_base_function;
1381 for (; bb < nb_base_functions; ++bb) {
1382 ++t_diff2_base_function;
1385 ++hessian_at_gauss_pts;
1404 template <
int Tensor_Dim0,
int Tensor_Dim1,
class T,
class L,
class A>
1413 template <
int Tensor_Dim0,
int Tensor_Dim1>
1417 Tensor_Dim0, Tensor_Dim1, double, ublas::row_major, DoubleAllocator> {
1439 template <
int Tensor_Dim0,
int Tensor_Dim1>
1447 "Data pointer not allocated");
1449 const size_t nb_gauss_pts = this->getGaussPts().size2();
1450 auto &mat = *this->dataPtr;
1451 if (
type == this->zeroType) {
1452 mat.resize(Tensor_Dim0 * Tensor_Dim1, nb_gauss_pts,
false);
1461 if (this->dataVec.use_count()) {
1462 this->dotVector.resize(nb_dofs,
false);
1463 const double *array;
1464 CHKERR VecGetArrayRead(this->dataVec, &array);
1466 for (
int i = 0;
i != local_indices.size(); ++
i)
1467 if (local_indices[
i] != -1)
1468 this->dotVector[
i] = array[local_indices[
i]];
1470 this->dotVector[
i] = 0;
1471 CHKERR VecRestoreArrayRead(this->dataVec, &array);
1475 const int nb_base_functions = data.
getN().size2();
1477 auto gradients_at_gauss_pts =
1478 getFTensor2FromMat<Tensor_Dim0, Tensor_Dim1>(mat);
1481 int size = nb_dofs / Tensor_Dim0;
1482 if (nb_dofs % Tensor_Dim0) {
1484 "Data inconsistency");
1486 for (
int gg = 0; gg < nb_gauss_pts; ++gg) {
1490 if (field_data.l2() != field_data.l2()) {
1491 MOFEM_LOG(
"SELF", Sev::error) <<
"field data " << field_data;
1493 "Wrong number in coefficients");
1498 for (; bb < size; ++bb) {
1501 if (diff_base_function.l2() != diff_base_function.l2()) {
1503 <<
"diff_base_function: " << diff_base_function;
1505 "Wrong number number in base functions");
1510 gradients_at_gauss_pts(
I,
J) += field_data(
I) * diff_base_function(
J);
1512 ++diff_base_function;
1516 for (; bb != nb_base_functions; ++bb)
1517 ++diff_base_function;
1518 ++gradients_at_gauss_pts;
1521 if (this->dataVec.use_count()) {
1534 template <
int Tensor_Dim0,
int Tensor_Dim1>
1537 Tensor_Dim0, Tensor_Dim1, double, ublas::row_major, DoubleAllocator> {
1549 template <
int Tensor_Dim0,
int Tensor_Dim1>
1554 boost::shared_ptr<MatrixDouble> data_ptr,
1555 const EntityType zero_at_type = MBVERTEX)
1568 const int nb_dofs = local_indices.size();
1569 const int nb_gauss_pts = this->
getGaussPts().size2();
1573 mat.resize(Tensor_Dim0 * Tensor_Dim1, nb_gauss_pts,
false);
1580 const double *array;
1582 for (
int i = 0;
i != local_indices.size(); ++
i)
1583 if (local_indices[
i] != -1)
1589 const int nb_base_functions = data.
getN().size2();
1591 auto gradients_at_gauss_pts =
1592 getFTensor2FromMat<Tensor_Dim0, Tensor_Dim1>(mat);
1595 int size = nb_dofs / Tensor_Dim0;
1596 if (nb_dofs % Tensor_Dim0) {
1600 for (
int gg = 0; gg < nb_gauss_pts; ++gg) {
1601 auto field_data = getFTensor1FromPtr<Tensor_Dim0>(&*
dotVector.begin());
1603 for (; bb < size; ++bb) {
1604 gradients_at_gauss_pts(
I,
J) += field_data(
I) * diff_base_function(
J);
1606 ++diff_base_function;
1610 for (; bb != nb_base_functions; ++bb)
1611 ++diff_base_function;
1612 ++gradients_at_gauss_pts;
1628 template <
int Tensor_Dim0,
int Tensor_Dim1,
class T,
class L,
class A>
1634 const std::string
field_name, boost::shared_ptr<MatrixDouble> data_ptr,
1635 const EntityType zero_type = MBVERTEX)
1641 template <
int Tensor_Dim0,
int Tensor_Dim1>
1645 Tensor_Dim0, Tensor_Dim1, double, ublas::row_major, DoubleAllocator> {
1648 const std::string
field_name, boost::shared_ptr<MatrixDouble> data_ptr,
1649 const EntityType zero_type = MBVERTEX)
1671 template <
int Tensor_Dim0,
int Tensor_Dim1>
1679 "Data pointer not allocated");
1681 const size_t nb_gauss_pts = this->getGaussPts().size2();
1682 constexpr
size_t msize = (Tensor_Dim0 * (Tensor_Dim0 + 1)) / 2;
1683 auto &mat = *this->dataPtr;
1684 if (
type == this->zeroType) {
1685 mat.resize(msize * Tensor_Dim1, nb_gauss_pts,
false);
1694 const int nb_base_functions = data.
getN().size2();
1696 auto gradients_at_gauss_pts =
1697 getFTensor3DgFromMat<Tensor_Dim0, Tensor_Dim1>(mat);
1701 int size = nb_dofs / msize;
1702 if (nb_dofs % msize) {
1704 "Data inconsistency");
1706 for (
int gg = 0; gg < nb_gauss_pts; ++gg) {
1709 for (; bb < size; ++bb) {
1710 gradients_at_gauss_pts(
I,
J,
K) +=
1711 field_data(
I,
J) * diff_base_function(
K);
1713 ++diff_base_function;
1717 for (; bb != nb_base_functions; ++bb)
1718 ++diff_base_function;
1719 ++gradients_at_gauss_pts;
1731 template <
int Tensor_Dim0,
int Tensor_Dim1>
1734 Tensor_Dim0, Tensor_Dim1, double, ublas::row_major, DoubleAllocator> {
1737 const std::string
field_name, boost::shared_ptr<MatrixDouble> data_ptr,
1738 const EntityType zero_type = MBVERTEX)
1744 template <
int Tensor_Dim0,
int Tensor_Dim1>
1747 Tensor_Dim0, Tensor_Dim1, double, ublas::row_major, DoubleAllocator> {
1769 template <
int Tensor_Dim0,
int Tensor_Dim1>
1775 "Data pointer not allocated");
1777 const size_t nb_gauss_pts = this->getGaussPts().size2();
1778 auto &mat = *this->dataPtr;
1779 if (
type == this->zeroType) {
1780 mat.resize(Tensor_Dim0 * Tensor_Dim1 * Tensor_Dim1, nb_gauss_pts,
false);
1789 if (this->dataVec.use_count()) {
1790 this->dotVector.resize(nb_dofs,
false);
1791 const double *array;
1792 CHKERR VecGetArrayRead(this->dataVec, &array);
1794 for (
int i = 0;
i != local_indices.size(); ++
i)
1795 if (local_indices[
i] != -1)
1796 this->dotVector[
i] = array[local_indices[
i]];
1798 this->dotVector[
i] = 0;
1799 CHKERR VecRestoreArrayRead(this->dataVec, &array);
1803 const int nb_base_functions = data.
getN().size2();
1805 auto &hessian_base = data.
getN(BaseDerivatives::SecondDerivative);
1807 if (hessian_base.size1() != nb_gauss_pts) {
1809 "Wrong number of integration pts (%d != %d)",
1810 hessian_base.size1(), nb_gauss_pts);
1812 if (hessian_base.size2() !=
1813 nb_base_functions * Tensor_Dim1 * Tensor_Dim1) {
1815 "Wrong number of base functions (%d != %d)",
1816 hessian_base.size2() / (Tensor_Dim1 * Tensor_Dim1),
1819 if (hessian_base.size2() < nb_dofs * Tensor_Dim1 * Tensor_Dim1) {
1821 "Wrong number of base functions (%d < %d)",
1822 hessian_base.size2(), nb_dofs * Tensor_Dim1 * Tensor_Dim1);
1826 auto t_diff2_base_function = getFTensor2FromPtr<Tensor_Dim1, Tensor_Dim1>(
1827 &*hessian_base.data().begin());
1829 auto t_hessian_at_gauss_pts =
1830 getFTensor3FromMat<Tensor_Dim0, Tensor_Dim1, Tensor_Dim1>(mat);
1836 int size = nb_dofs / Tensor_Dim0;
1838 if (nb_dofs % Tensor_Dim0) {
1840 "Data inconsistency");
1844 for (
int gg = 0; gg < nb_gauss_pts; ++gg) {
1847 for (; bb < size; ++bb) {
1848 t_hessian_at_gauss_pts(
I,
J,
K) +=
1849 field_data(
I) * t_diff2_base_function(
J,
K);
1851 ++t_diff2_base_function;
1855 for (; bb != nb_base_functions; ++bb)
1856 ++t_diff2_base_function;
1857 ++t_hessian_at_gauss_pts;
1860 if (this->dataVec.use_count()) {
1882 template <
int DIM_01,
int DIM_23,
int S = 0>
1894 boost::shared_ptr<MatrixDouble> in_mat,
1895 boost::shared_ptr<MatrixDouble> out_mat,
1896 boost::shared_ptr<MatrixDouble> d_mat)
1909 boost::shared_ptr<MatrixDouble> out_mat,
1910 boost::shared_ptr<MatrixDouble> d_mat)
1923 const size_t nb_gauss_pts =
getGaussPts().size2();
1924 auto t_D = getFTensor4DdgFromMat<DIM_01, DIM_23, S>(*(
dMat));
1925 auto t_in = getFTensor2SymmetricFromMat<DIM_01>(*(
inMat));
1926 outMat->resize((DIM_23 * (DIM_23 + 1)) / 2, nb_gauss_pts,
false);
1927 auto t_out = getFTensor2SymmetricFromMat<DIM_23>(*(
outMat));
1928 for (
size_t gg = 0; gg != nb_gauss_pts; ++gg) {
1929 t_out(
i,
j) = t_D(
i,
j,
k,
l) * t_in(
k,
l);
1944 boost::shared_ptr<MatrixDouble>
dMat;
1957 boost::shared_ptr<MatrixDouble> in_mat,
1958 boost::shared_ptr<MatrixDouble> out_mat)
1969 boost::shared_ptr<MatrixDouble> out_mat)
1980 const size_t nb_gauss_pts =
getGaussPts().size2();
1981 auto t_in = getFTensor2FromMat<DIM, DIM>(*(
inMat));
1982 outMat->resize((DIM * (DIM + 1)) / 2, nb_gauss_pts,
false);
1983 auto t_out = getFTensor2SymmetricFromMat<DIM>(*(
outMat));
1984 for (
size_t gg = 0; gg != nb_gauss_pts; ++gg) {
1985 t_out(
i,
j) = (t_in(
i,
j) || t_in(
j,
i)) / 2;
2005 boost::shared_ptr<MatrixDouble> in_mat,
2006 boost::shared_ptr<MatrixDouble> out_mat)
2039 template <
int Base_Dim,
int Field_Dim,
class T,
class L,
class A>
2045 template <
int Field_Dim>
2051 boost::shared_ptr<MatrixDouble> data_ptr,
2052 const EntityType zero_type = MBEDGE,
2053 const int zero_side = 0)
2056 dataPtr(data_ptr), zeroType(zero_type), zeroSide(zero_side) {
2077 template <
int Field_Dim>
2083 const size_t nb_integration_points = this->getGaussPts().size2();
2084 if (
type == zeroType && side == zeroSide) {
2085 dataPtr->resize(Field_Dim, nb_integration_points,
false);
2091 const size_t nb_base_functions = data.
getN().size2() / 3;
2094 auto t_data = getFTensor1FromMat<Field_Dim>(*dataPtr);
2095 for (
size_t gg = 0; gg != nb_integration_points; ++gg) {
2098 for (; bb != nb_dofs; ++bb) {
2099 t_data(
i) += t_n_hdiv(
i) * t_dof;
2103 for (; bb != nb_base_functions; ++bb)
2114 template <
int Base_Dim,
int Field_Dim = Base_Dim>
2117 Base_Dim, Field_Dim, double, ublas::row_major, DoubleAllocator> {
2126 template <
int Base_Dim,
int Field_Dim = Base_Dim>
2129 template <
int Field_Dim>
2134 boost::shared_ptr<MatrixDouble> data_ptr,
2135 const EntityType zero_type = MBEDGE,
2136 const int zero_side = 0)
2139 dataPtr(data_ptr), zeroType(zero_type), zeroSide(zero_side) {
2160 template <
int Field_Dim>
2165 const size_t nb_integration_points = this->getGaussPts().size2();
2166 if (
type == zeroType && side == zeroSide) {
2167 dataPtr->resize(Field_Dim, nb_integration_points,
false);
2172 const size_t nb_dofs = local_indices.size();
2175 std::array<double, MAX_DOFS_ON_ENTITY> dot_dofs_vector;
2176 const double *array;
2177 CHKERR VecGetArrayRead(getFEMethod()->ts_u_t, &array);
2178 for (
size_t i = 0;
i != nb_dofs; ++
i)
2179 if (local_indices[
i] != -1)
2180 dot_dofs_vector[
i] = array[local_indices[
i]];
2182 dot_dofs_vector[
i] = 0;
2183 CHKERR VecRestoreArrayRead(getFEMethod()->ts_u_t, &array);
2185 const size_t nb_base_functions = data.
getN().size2() / 3;
2188 auto t_data = getFTensor1FromMat<Field_Dim>(*dataPtr);
2189 for (
size_t gg = 0; gg != nb_integration_points; ++gg) {
2191 for (; bb != nb_dofs; ++bb) {
2192 t_data(
i) += t_n_hdiv(
i) * dot_dofs_vector[bb];
2195 for (; bb != nb_base_functions; ++bb)
2211 template <
int BASE_DIM,
int SPACE_DIM>
2216 boost::shared_ptr<VectorDouble> data_ptr,
2217 const EntityType zero_type = MBEDGE,
2218 const int zero_side = 0)
2229 const size_t nb_integration_points =
getGaussPts().size2();
2231 dataPtr->resize(nb_integration_points,
false);
2237 const size_t nb_base_functions = data.
getN().size2() /
BASE_DIM;
2242 for (
size_t gg = 0; gg != nb_integration_points; ++gg) {
2245 for (; bb != nb_dofs; ++bb) {
2246 t_data += t_dof * t_n_diff_hdiv(
j,
j);
2250 for (; bb != nb_base_functions; ++bb)
2270 template <
int BASE_DIM,
int SPACE_DIM>
2275 boost::shared_ptr<MatrixDouble> data_ptr,
2276 const EntityType zero_type = MBEDGE,
2277 const int zero_side = 0)
2288 const size_t nb_integration_points =
getGaussPts().size2();
2296 const size_t nb_base_functions = data.
getN().size2() /
BASE_DIM;
2300 auto t_data = getFTensor2FromMat<BASE_DIM, SPACE_DIM>(*
dataPtr);
2301 for (
size_t gg = 0; gg != nb_integration_points; ++gg) {
2304 for (; bb != nb_dofs; ++bb) {
2305 t_data(
i,
j) += t_dof * t_base_diff(
i,
j);
2309 for (; bb != nb_base_functions; ++bb)
2329 template <
int BASE_DIM,
int SPACE_DIM>
2334 boost::shared_ptr<MatrixDouble> data_ptr,
2335 const EntityType zero_type = MBEDGE,
2336 const int zero_side = 0)
2347 const size_t nb_integration_points =
getGaussPts().size2();
2357 const int nb_base_functions = data.
getN().size2() /
BASE_DIM;
2360 auto &hessian_base = data.
getN(BaseDerivatives::SecondDerivative);
2361 if (hessian_base.size1() != nb_integration_points) {
2363 "Wrong number of integration pts (%d != %d)",
2364 hessian_base.size1(), nb_integration_points);
2366 if (hessian_base.size2() !=
2369 "Wrong number of base functions (%d != %d)",
2375 "Wrong number of base functions (%d < %d)", hessian_base.size2(),
2386 auto t_data = getFTensor3FromMat<BASE_DIM, SPACE_DIM, SPACE_DIM>(*
dataPtr);
2387 for (
size_t gg = 0; gg != nb_integration_points; ++gg) {
2390 for (; bb != nb_dofs; ++bb) {
2391 t_data(
i,
j,
k) += t_dof * t_base_diff2(
i,
j,
k);
2396 for (; bb != nb_base_functions; ++bb)
2415 template <
int Tensor_Dim1,
int Tensor_Dim2>
2420 boost::shared_ptr<VectorDouble> data_ptr,
2421 const EntityType zero_type = MBEDGE,
2422 const int zero_side = 0)
2433 const size_t nb_integration_points =
getGaussPts().size2();
2435 dataPtr->resize(nb_integration_points,
false);
2440 const int nb_dofs = local_indices.size();
2443 std::array<double, MAX_DOFS_ON_ENTITY> dot_dofs_vector;
2444 const double *array;
2446 for (
size_t i = 0;
i != local_indices.size(); ++
i)
2447 if (local_indices[
i] != -1)
2448 dot_dofs_vector[
i] = array[local_indices[
i]];
2450 dot_dofs_vector[
i] = 0;
2453 const size_t nb_base_functions = data.
getN().size2() / Tensor_Dim1;
2457 for (
size_t gg = 0; gg != nb_integration_points; ++gg) {
2459 for (; bb != nb_dofs; ++bb) {
2461 for (
auto ii = 0; ii != Tensor_Dim2; ++ii)
2462 div += t_n_diff_hdiv(ii, ii);
2463 t_data += dot_dofs_vector[bb] * div;
2466 for (; bb != nb_base_functions; ++bb)
2502 boost::shared_ptr<MatrixDouble> data_ptr,
2503 const EntityType zero_type = MBEDGE,
2504 const int zero_side = 0);
2526 boost::shared_ptr<MatrixDouble> data_ptr,
2527 const EntityType zero_type = MBVERTEX,
2528 const int zero_side = 0);
2551 boost::shared_ptr<MatrixDouble> data_ptr,
2552 const EntityType zero_type = MBVERTEX,
2553 const int zero_side = 0);
2571 template <
int Tensor_Dim0,
int Tensor_Dim1>
2576 boost::shared_ptr<MatrixDouble> data_ptr,
2577 const EntityType zero_type = MBEDGE,
2578 const int zero_side = 0)
2589 const size_t nb_integration_points =
getGaussPts().size2();
2591 dataPtr->resize(Tensor_Dim0 * Tensor_Dim1, nb_integration_points,
false);
2596 const size_t nb_base_functions = data.
getN().size2() / 3;
2600 auto t_data = getFTensor2FromMat<Tensor_Dim0, Tensor_Dim1>(*
dataPtr);
2601 for (
size_t gg = 0; gg != nb_integration_points; ++gg) {
2604 for (; bb != nb_dofs / Tensor_Dim0; ++bb) {
2605 t_data(
i,
j) += t_dof(
i) * t_n_hvec(
j);
2609 for (; bb < nb_base_functions; ++bb)
2630 template <
int Tensor_Dim0,
int Tensor_Dim1>
2635 boost::shared_ptr<MatrixDouble> data_ptr,
2636 const EntityType zero_type = MBEDGE,
2637 const int zero_side = 0)
2648 const size_t nb_integration_points =
getGaussPts().size2();
2650 dataPtr->resize(Tensor_Dim0 * Tensor_Dim1, nb_integration_points,
false);
2656 const size_t nb_base_functions =
2657 data.
getN().size2() / (Tensor_Dim0 * Tensor_Dim1);
2660 auto t_n_hten = data.
getFTensor2N<Tensor_Dim0, Tensor_Dim1>();
2661 auto t_data = getFTensor2FromMat<Tensor_Dim0, Tensor_Dim1>(*
dataPtr);
2662 for (
size_t gg = 0; gg != nb_integration_points; ++gg) {
2665 for (; bb != nb_dofs; ++bb) {
2666 t_data(
i,
j) += t_dof * t_n_hten(
i,
j);
2670 for (; bb < nb_base_functions; ++bb)
2690 template <
int Tensor_Dim0,
int Tensor_Dim1,
2696 boost::shared_ptr<MatrixDouble> data_ptr,
2697 const EntityType zero_type = MBEDGE,
2698 const int zero_side = 0)
2709 const size_t nb_integration_points =
getGaussPts().size2();
2711 dataPtr->resize(Tensor_Dim0, nb_integration_points,
false);
2716 const size_t nb_base_functions = data.
getN().size2() / 3;
2720 auto t_data = getFTensor1FromMat<Tensor_Dim0>(*
dataPtr);
2723 for (
size_t gg = 0; gg != nb_integration_points; ++gg) {
2726 for (; bb != nb_dofs / Tensor_Dim0; ++bb) {
2727 double div = t_n_diff_hvec(
j,
j);
2728 t_data(
i) += t_dof(
i) * div;
2730 t_data(
i) += t_base(0) * (t_dof(
i) / t_coords(0));
2736 for (; bb < nb_base_functions; ++bb) {
2759 template <
int Tensor_Dim,
typename OpBase>
2763 boost::shared_ptr<MatrixDouble> data_ptr,
2764 const EntityType zero_type = MBEDGE,
2765 const int zero_side = 0)
2775 const size_t nb_integration_points = OpBase::getGaussPts().size2();
2777 dataPtr->resize(Tensor_Dim, nb_integration_points,
false);
2782 auto t_normal = OpBase::getFTensor1NormalsAtGaussPts();
2783 const size_t nb_base_functions = data.
getN().size2() / 3;
2785 auto t_data = getFTensor1FromMat<Tensor_Dim>(*
dataPtr);
2786 for (
size_t gg = 0; gg != nb_integration_points; ++gg) {
2788 t_normalized_normal(
j) = t_normal(
j);
2792 for (; bb != nb_dofs / Tensor_Dim; ++bb) {
2793 t_data(
i) += t_dof(
i) * (t_base(
j) * t_normalized_normal(
j));
2797 for (; bb < nb_base_functions; ++bb) {
2839 boost::shared_ptr<MatrixDouble> inv_jac_ptr);
2842 template <
int D1,
int D2,
int J1,
int J2>
2846 static_assert(D2 == J2,
"Dimension of jacobian and dimension of <out> "
2847 "directive does not match");
2849 size_t nb_functions = diff_n.size2() / D1;
2851 size_t nb_gauss_pts = diff_n.size1();
2856 "Wrong number of Gauss Pts");
2857 if (diff_n.size2() % D1)
2859 "Number of direvatives of base functions and D1 dimension does "
2863 diffNinvJac.resize(diff_n.size1(), D2 * nb_functions,
false);
2867 auto t_diff_n = getFTensor1FromPtr<D2>(&*
diffNinvJac.data().begin());
2868 auto t_diff_n_ref = getFTensor1FromPtr<D1>(&*diff_n.data().begin());
2869 auto t_inv_jac = getFTensor2FromMat<J1, J2>(*
invJacPtr);
2870 for (
size_t gg = 0; gg != nb_gauss_pts; ++gg, ++t_inv_jac) {
2871 for (
size_t dd = 0;
dd != nb_functions; ++
dd) {
2872 t_diff_n(
i) = t_inv_jac(
k,
i) * t_diff_n_ref(
k);
2917 template <
int DERIVARIVE = 1>
2924 template <
int DERIVARIVE = 1>
2931 template <
int DERIVARIVE = 1>
2935 boost::shared_ptr<MatrixDouble> inv_jac_ptr)
2939 template <
int DERIVARIVE = 1>
2943 boost::shared_ptr<MatrixDouble> inv_jac_ptr)
2961 invJacPtr(inv_jac_ptr) {}
3015 boost::shared_ptr<MatrixDouble> inv_jac_ptr);
3052 boost::shared_ptr<MatrixDouble> jac_ptr)
3252 boost::shared_ptr<VectorDouble> det_ptr,
3253 boost::shared_ptr<MatrixDouble> out_ptr)
3273 "Pointer for inPtr matrix not allocated");
3276 "Pointer for detPtr matrix not allocated");
3278 const auto nb_rows = inPtr->size1();
3279 const auto nb_integration_pts = inPtr->size2();
3283 detPtr->resize(nb_integration_pts,
false);
3284 auto t_in = getFTensor2FromMat<DIM, DIM>(*inPtr);
3286 for (
size_t gg = 0; gg != nb_integration_pts; ++gg) {
3295 outPtr->resize(nb_rows, nb_integration_pts,
false);
3296 auto t_in = getFTensor2FromMat<DIM, DIM>(*inPtr);
3297 auto t_out = getFTensor2FromMat<DIM, DIM>(*outPtr);
3299 for (
size_t gg = 0; gg != nb_integration_pts; ++gg) {
3322 boost::shared_ptr<VectorDouble> out_ptr)
3343 "Pointer for inPtr matrix not allocated");
3345 const auto nb_integration_pts = inPtr->size2();
3348 outPtr->resize(nb_integration_pts,
false);
3349 auto t_in = getFTensor2FromMat<DIM, DIM>(*inPtr);
3352 for (
size_t gg = 0; gg != nb_integration_pts; ++gg) {
3375 boost::shared_ptr<VectorDouble> out_ptr)
3396 "Pointer for inPtr matrix not allocated");
3398 const auto nb_integration_pts = inPtr->size2();
3401 outPtr->resize(nb_integration_pts,
false);
3402 auto t_in = getFTensor2SymmetricFromMat<DIM>(*inPtr);
3405 for (
size_t gg = 0; gg != nb_integration_pts; ++gg) {
3417 #endif // __USER_DATA_OPERATORS_HPP__