38 doEntities{true, true, true, true, true, true,
39 true, true, true, true, true, true},
41 doVertices(doEntities[MBVERTEX]), doEdges(doEntities[MBEDGE]),
42 doQuads(doEntities[MBQUAD]), doTris(doEntities[MBTRI]),
43 doTets(doEntities[MBTET]), doPrisms(doEntities[MBPRISM]) {
59 [&](boost::ptr_vector<DataForcesAndSourcesCore::EntData> &row_ent_data,
60 const int ss,
const EntityType row_type,
const EntityType low_type,
61 const EntityType hi_type) {
63 for (EntityType col_type = low_type; col_type != hi_type; ++col_type) {
65 for (
size_t SS = 0; SS != col_ent_data.size(); SS++) {
66 if (col_ent_data[SS].getFieldData().size())
67 CHKERR doWork(ss, SS, row_type, col_type, row_ent_data[ss],
74 auto do_row_entity = [&](
const EntityType
type) {
77 for (
size_t ss = 0; ss != row_ent_data.size(); ++ss) {
88 static_cast<EntityType
>(
type + 1), MBMAXTYPE);
93 for (EntityType row_type = MBVERTEX; row_type != MBENTITYSET; ++row_type) {
95 CHKERR do_row_entity(row_type);
100 for (
unsigned int mm = 0; mm != row_data.
dataOnEntities[MBENTITYSET].size();
102 if (!row_data.
dataOnEntities[MBENTITYSET][mm].getFieldData().empty()) {
103 CHKERR do_row_entity(MBENTITYSET);
114 return opLhs<true>(row_data, col_data);
116 return opLhs<false>(row_data, col_data);
119 template <
bool ErrorIfNoBase>
122 const std::array<bool, MBMAXTYPE> &do_entities) {
125 auto do_entity = [&](
auto type) {
129 const size_t size = ent_data.size();
130 for (
size_t ss = 0; ss != size; ++ss) {
132 auto &side_data = ent_data[ss];
135 if (side_data.getFieldData().size() &&
136 (side_data.getBase() ==
NOBASE ||
138 for (VectorDofs::iterator it = side_data.getFieldDofs().begin();
139 it != side_data.getFieldDofs().end(); it++)
140 if ((*it) && (*it)->getActive())
151 for (EntityType row_type = MBVERTEX; row_type != MBENTITYSET; ++row_type) {
152 if (do_entities[row_type]) {
153 CHKERR do_entity(row_type);
157 if (do_entities[MBENTITYSET]) {
160 for (
unsigned int mm = 0; mm != data.
dataOnEntities[MBENTITYSET].size();
162 if (!data.
dataOnEntities[MBENTITYSET][mm].getFieldData().empty()) {
172 const bool error_if_no_base) {
173 if (error_if_no_base)
184 auto A = getFTensor2FromMat<3, 3>(jac_data);
185 int nb_gauss_pts = jac_data.size2();
186 det_data.resize(nb_gauss_pts,
false);
187 inv_jac_data.resize(3, nb_gauss_pts,
false);
189 auto I = getFTensor2FromMat<3, 3>(inv_jac_data);
190 for (
int gg = 0; gg != nb_gauss_pts; ++gg) {
205 const bool diff_at_gauss_ptr) {
213 const int nb_base_functions =
214 (diff_at_gauss_ptr ||
type != MBVERTEX) ? diff_n.size2() / 3 : 4;
215 const int nb_gauss_pts =
216 (diff_at_gauss_ptr ||
type != MBVERTEX) ? diff_n.size1() : 1;
217 diffNinvJac.resize(diff_n.size1(), diff_n.size2(),
false);
219 double *t_diff_n_ptr = &*diff_n.data().begin();
221 t_diff_n_ptr, &t_diff_n_ptr[1], &t_diff_n_ptr[2]);
222 double *t_inv_n_ptr = &*
diffNinvJac.data().begin();
224 t_inv_n_ptr, &t_inv_n_ptr[1], &t_inv_n_ptr[2]);
232 for (
unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
233 for (
unsigned int bb = 0; bb != nb_base_functions; ++bb) {
258 CHKERR transform_base(*(
m.second),
true);
263 CHKERR transform_base(*ptr,
true);
281 const unsigned int nb_gauss_pts = data.
getDiffN(base).size1();
282 const unsigned int nb_base_functions = data.
getDiffN(base).size2() / 9;
283 if (!nb_base_functions)
291 t_inv_diff_n_ptr, &t_inv_diff_n_ptr[
HVEC0_1],
300 for (
unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
301 for (
unsigned int bb = 0; bb != nb_base_functions; ++bb) {
325 const unsigned int nb_base_functions = data.
getN(base).size2() / 3;
326 if (!nb_base_functions)
329 const double c = 1. / 6.;
330 const unsigned int nb_gauss_pts = data.
getN(base).size1();
334 piolaN.resize(nb_gauss_pts, data.
getN(base).size2(),
false);
335 if (data.
getN(base).size2() > 0) {
337 double *t_transformed_n_ptr = &*
piolaN.data().begin();
340 &t_transformed_n_ptr[
HVEC1], &t_transformed_n_ptr[
HVEC2]);
341 for (
unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
342 for (
unsigned int bb = 0; bb != nb_base_functions; ++bb) {
343 t_transformed_n(
i) = a *
tJac(
i,
k) * t_n(
k);
352 if (data.
getDiffN(base).size2() > 0) {
354 double *t_transformed_diff_n_ptr = &*
piolaDiffN.data().begin();
356 t_transformed_diff_n(t_transformed_diff_n_ptr,
357 &t_transformed_diff_n_ptr[
HVEC0_1],
358 &t_transformed_diff_n_ptr[
HVEC0_2],
359 &t_transformed_diff_n_ptr[
HVEC1_0],
360 &t_transformed_diff_n_ptr[
HVEC1_1],
361 &t_transformed_diff_n_ptr[
HVEC1_2],
362 &t_transformed_diff_n_ptr[
HVEC2_0],
363 &t_transformed_diff_n_ptr[
HVEC2_1],
364 &t_transformed_diff_n_ptr[
HVEC2_2]);
365 for (
unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
366 for (
unsigned int bb = 0; bb != nb_base_functions; ++bb) {
367 t_transformed_diff_n(
i,
k) = a *
tJac(
i,
j) * t_diff_n(
j,
k);
369 ++t_transformed_diff_n;
391 const unsigned int nb_base_functions = data.
getN(base).size2() / 3;
392 if (!nb_base_functions)
395 const unsigned int nb_gauss_pts = data.
getN(base).size1();
396 piolaN.resize(nb_gauss_pts, data.
getN(base).size2(),
false);
400 double *t_transformed_n_ptr = &*
piolaN.data().begin();
402 t_transformed_n_ptr, &t_transformed_n_ptr[
HVEC1],
403 &t_transformed_n_ptr[
HVEC2]);
405 double *t_transformed_diff_n_ptr = &*
piolaDiffN.data().begin();
407 t_transformed_diff_n_ptr, &t_transformed_diff_n_ptr[
HVEC0_1],
408 &t_transformed_diff_n_ptr[
HVEC0_2], &t_transformed_diff_n_ptr[
HVEC1_0],
409 &t_transformed_diff_n_ptr[
HVEC1_1], &t_transformed_diff_n_ptr[
HVEC1_2],
410 &t_transformed_diff_n_ptr[
HVEC2_0], &t_transformed_diff_n_ptr[
HVEC2_1],
411 &t_transformed_diff_n_ptr[
HVEC2_2]);
413 for (
unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
414 for (
unsigned int bb = 0; bb != nb_base_functions; ++bb) {
420 ++t_transformed_diff_n;
439 "It looks that ho inverse of Jacobian is not calculated %d != 9",
445 unsigned int nb_gauss_pts = diff_n.size1();
446 if (nb_gauss_pts == 0)
449 if (
invHoJac.size1() == nb_gauss_pts) {
451 unsigned int nb_base_functions = diff_n.size2() / 3;
452 if (nb_base_functions == 0)
455 double *t_inv_jac_ptr = &*
invHoJac.data().begin();
457 t_inv_jac_ptr, &t_inv_jac_ptr[1], &t_inv_jac_ptr[2],
458 &t_inv_jac_ptr[3], &t_inv_jac_ptr[4], &t_inv_jac_ptr[5],
459 &t_inv_jac_ptr[6], &t_inv_jac_ptr[7], &t_inv_jac_ptr[8], 9);
461 diffNinvJac.resize(nb_gauss_pts, 3 * nb_base_functions,
false);
463 double *t_diff_n_ptr = &*diff_n.data().begin();
465 t_diff_n_ptr, &t_diff_n_ptr[1], &t_diff_n_ptr[2]);
466 double *t_inv_n_ptr = &*
diffNinvJac.data().begin();
475 for (
unsigned int gg = 0; gg < nb_gauss_pts; ++gg) {
476 for (
unsigned int bb = 0; bb != nb_base_functions; ++bb) {
477 t_inv_diff_n(
i) = t_diff_n(
j) * t_inv_jac(
j,
i);
501 CHKERR transform_base(*(
m.second));
506 CHKERR transform_base(*ptr);
525 data.
getDiffN(base).size2(),
false);
527 unsigned int nb_gauss_pts = data.
getDiffN(base).size1();
528 unsigned int nb_base_functions = data.
getDiffN(base).size2() / 9;
529 if (nb_base_functions == 0)
535 t_inv_diff_n_ptr, &t_inv_diff_n_ptr[
HVEC0_1],
540 double *t_inv_jac_ptr = &*
invHoJac.data().begin();
542 t_inv_jac_ptr, &t_inv_jac_ptr[1], &t_inv_jac_ptr[2], &t_inv_jac_ptr[3],
543 &t_inv_jac_ptr[4], &t_inv_jac_ptr[5], &t_inv_jac_ptr[6],
544 &t_inv_jac_ptr[7], &t_inv_jac_ptr[8]);
546 for (
unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
547 for (
unsigned int bb = 0; bb != nb_base_functions; ++bb) {
548 t_inv_diff_n(
i,
j) = t_inv_jac(
k,
j) * t_diff_n(
i,
k);
572 unsigned int nb_gauss_pts = data.
getN(base).size1();
573 unsigned int nb_base_functions = data.
getN(base).size2() / 3;
574 piolaN.resize(nb_gauss_pts, data.
getN(base).size2(),
false);
578 double *t_transformed_n_ptr = &*
piolaN.data().begin();
581 &t_transformed_n_ptr[
HVEC1], &t_transformed_n_ptr[
HVEC2]);
583 double *t_transformed_diff_n_ptr = &*
piolaDiffN.data().begin();
585 t_transformed_diff_n_ptr, &t_transformed_diff_n_ptr[
HVEC0_1],
586 &t_transformed_diff_n_ptr[
HVEC0_2], &t_transformed_diff_n_ptr[
HVEC1_0],
587 &t_transformed_diff_n_ptr[
HVEC1_1], &t_transformed_diff_n_ptr[
HVEC1_2],
588 &t_transformed_diff_n_ptr[
HVEC2_0], &t_transformed_diff_n_ptr[
HVEC2_1],
589 &t_transformed_diff_n_ptr[
HVEC2_2]);
592 double *t_jac_ptr = &*
hoJac.data().begin();
594 t_jac_ptr, &t_jac_ptr[1], &t_jac_ptr[2], &t_jac_ptr[3], &t_jac_ptr[4],
595 &t_jac_ptr[5], &t_jac_ptr[6], &t_jac_ptr[7], &t_jac_ptr[8]);
597 for (
unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
598 for (
unsigned int bb = 0; bb != nb_base_functions; ++bb) {
599 const double a = 1. / t_det;
600 t_transformed_n(
i) = a * t_jac(
i,
k) * t_n(
k);
601 t_transformed_diff_n(
i,
k) = a * t_jac(
i,
j) * t_diff_n(
j,
k);
605 ++t_transformed_diff_n;
629 unsigned int nb_gauss_pts = data.
getN(base).size1();
630 unsigned int nb_base_functions = data.
getN(base).size2() / 3;
631 piolaN.resize(nb_gauss_pts, data.
getN(base).size2(),
false);
635 double *t_transformed_n_ptr = &*
piolaN.data().begin();
638 &t_transformed_n_ptr[
HVEC1], &t_transformed_n_ptr[
HVEC2]);
640 double *t_transformed_diff_n_ptr = &*
piolaDiffN.data().begin();
642 t_transformed_diff_n_ptr, &t_transformed_diff_n_ptr[
HVEC0_1],
643 &t_transformed_diff_n_ptr[
HVEC0_2], &t_transformed_diff_n_ptr[
HVEC1_0],
644 &t_transformed_diff_n_ptr[
HVEC1_1], &t_transformed_diff_n_ptr[
HVEC1_2],
645 &t_transformed_diff_n_ptr[
HVEC2_0], &t_transformed_diff_n_ptr[
HVEC2_1],
646 &t_transformed_diff_n_ptr[
HVEC2_2]);
648 double *t_inv_jac_ptr = &*
hoInvJac.data().begin();
650 t_inv_jac_ptr, &t_inv_jac_ptr[1], &t_inv_jac_ptr[2], &t_inv_jac_ptr[3],
651 &t_inv_jac_ptr[4], &t_inv_jac_ptr[5], &t_inv_jac_ptr[6],
652 &t_inv_jac_ptr[7], &t_inv_jac_ptr[8], 9);
654 for (
unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
655 for (
unsigned int bb = 0; bb != nb_base_functions; ++bb) {
656 t_transformed_n(
i) = t_inv_jac(
k,
i) * t_n(
k);
657 t_transformed_diff_n(
i,
k) = t_inv_jac(
j,
i) * t_diff_n(
j,
k);
661 ++t_transformed_diff_n;
682 int nb_gauss_pts = data.
getN().size1();
689 &
m(0, 0), &
m(0, 1), &
m(0, 2));
707 if (2 * data.
getN().size2() != data.
getDiffN().size2()) {
710 if (nb_dofs % 3 != 0) {
713 if (nb_dofs > 3 * data.
getN().size2()) {
715 for (; nn != nb_dofs; nn++) {
719 if (nn > 3 * data.
getN().size2()) {
721 "data inconsistency for base %s",
729 const int nb_base_functions = data.
getN().size2();
732 for (
int gg = 0; gg != nb_gauss_pts; ++gg) {
735 for (; bb != nb_dofs / 3; ++bb) {
736 t_coords(
i) += t_base * t_data(
i);
737 t_t1(
i) += t_data(
i) * t_diff_base(
N0);
738 t_t2(
i) += t_data(
i) * t_diff_base(
N1);
743 for (; bb != nb_base_functions; ++bb) {
766 &
m(0, 0), &
m(0, 1), &
m(0, 2));
793 const int valid_edges3[] = {1, 1, 1, 0, 0, 0, 0, 0, 0};
794 const int valid_faces3[] = {0, 0, 0, 1, 0, 0, 0, 0, 0};
795 const int valid_edges4[] = {0, 0, 0, 0, 0, 0, 1, 1, 1};
796 const int valid_faces4[] = {0, 0, 0, 0, 1, 0, 0, 0, 0};
798 if (
type == MBEDGE) {
799 if (!valid_edges3[side] || valid_edges4[side])
801 }
else if (
type == MBTRI) {
802 if (!valid_faces3[side] || valid_faces4[side])
808 for (
unsigned int gg = 0; gg < data.
getN().size1(); ++gg) {
809 for (
int dd = 0;
dd < 3;
dd++) {
827 if (2 * data.
getN().size2() != data.
getDiffN().size2()) {
831 if (nb_dofs % 3 != 0) {
834 if (nb_dofs > 3 * data.
getN().size2()) {
836 "data inconsistency, side %d type %d", side,
type);
838 for (
unsigned int gg = 0; gg < data.
getN().size1(); ++gg) {
839 for (
int dd = 0;
dd < 3;
dd++) {
840 if ((
type == MBTRI && valid_faces3[side]) ||
841 (
type == MBEDGE && valid_edges3[side])) {
850 }
else if ((
type == MBTRI && valid_faces4[side]) ||
851 (
type == MBEDGE && valid_edges4[side])) {
906 "Pointer to normal/normals not set");
909 if (normal_is_at_gauss_pts)
912 auto apply_transform_linear_geometry = [&](
auto base,
auto nb_gauss_pts,
913 auto nb_base_functions) {
919 const auto l02 = t_normal(
i) * t_normal(
i);
921 for (
int gg = 0; gg != nb_gauss_pts; ++gg) {
922 for (
int bb = 0; bb != nb_base_functions; ++bb) {
923 const auto v = t_base(0);
924 t_base(
i) = (v / l02) * t_normal(
i);
931 auto apply_transform_nonlinear_geometry = [&](
auto base,
auto nb_gauss_pts,
932 auto nb_base_functions) {
936 &normals_at_pts(0, 0), &normals_at_pts(0, 1), &normals_at_pts(0, 2));
938 for (
int gg = 0; gg != nb_gauss_pts; ++gg) {
939 for (
int bb = 0; bb != nb_base_functions; ++bb) {
940 const auto v = t_base(0);
941 const auto l2 = t_normal(
i) * t_normal(
i);
942 t_base(
i) = (v / l2) * t_normal(
i);
950 if (normal_is_at_gauss_pts) {
954 const auto &base_functions = data.
getN(base);
955 const auto nb_gauss_pts = base_functions.size1();
961 "normalsAtGaussPtsRawPtr has inconsistent number of "
965 const auto nb_base_functions = base_functions.size2() / 3;
966 CHKERR apply_transform_nonlinear_geometry(base, nb_gauss_pts,
974 const auto &base_functions = data.
getN(base);
975 const auto nb_gauss_pts = base_functions.size1();
978 const auto nb_base_functions = base_functions.size2() / 3;
979 CHKERR apply_transform_linear_geometry(base, nb_gauss_pts,
992 if (
type != MBEDGE &&
type != MBTRI)
1016 auto &baseN = data.
getN(base);
1017 auto &diffBaseN = data.
getDiffN(base);
1019 int nb_dofs = baseN.size2() / 3;
1020 int nb_gauss_pts = baseN.size1();
1023 MatrixDouble diff_piola_n(diffBaseN.size1(), diffBaseN.size2());
1025 if (nb_dofs > 0 && nb_gauss_pts > 0) {
1036 t_transformed_diff_h_curl(
1050 for (
int gg = 0; gg < nb_gauss_pts; ++gg) {
1053 for (
int ll = 0; ll != nb_dofs; ll++) {
1054 t_transformed_h_curl(
i) = t_inv_m(
j,
i) * t_h_curl(
j);
1055 t_transformed_diff_h_curl(
i,
k) =
1056 t_inv_m(
j,
i) * t_diff_h_curl(
j,
k);
1058 ++t_transformed_h_curl;
1060 ++t_transformed_diff_h_curl;
1066 for (
int gg = 0; gg < nb_gauss_pts; ++gg) {
1067 for (
int ll = 0; ll != nb_dofs; ll++) {
1068 t_transformed_h_curl(
i) = t_inv_m(
j,
i) * t_h_curl(
j);
1069 t_transformed_diff_h_curl(
i,
k) =
1070 t_inv_m(
j,
i) * t_diff_h_curl(
j,
k);
1072 ++t_transformed_h_curl;
1074 ++t_transformed_diff_h_curl;
1079 if (cc != nb_gauss_pts * nb_dofs)
1082 baseN.data().swap(piola_n.data());
1083 diffBaseN.data().swap(diff_piola_n.data());
1099 int nb_gauss_pts = data.
getN().size1();
1100 tAngent.resize(nb_gauss_pts, 3,
false);
1102 int nb_approx_fun = data.
getN().size2();
1103 double *diff = &*data.
getDiffN().data().begin();
1107 tAngent.resize(nb_gauss_pts, 3,
false);
1111 for (
int dd = 0;
dd != 3;
dd++) {
1112 for (
int gg = 0; gg != nb_gauss_pts; ++gg) {
1120 "Approximated field should be rank 3, i.e. vector in 3d space");
1122 for (
int dd = 0;
dd != 3;
dd++) {
1123 for (
int gg = 0; gg != nb_gauss_pts; ++gg) {
1125 cblas_ddot(nb_dofs / 3, &diff[gg * nb_approx_fun], 1, dofs[
dd], 3);
1131 "This operator can calculate tangent vector only on edge");
1147 const double l0 = t_m(
i) * t_m(
i);
1149 auto get_base_at_pts = [&](
auto base) {
1156 auto get_tangent_at_pts = [&]() {
1163 auto calculate_squared_edge_length = [&]() {
1164 std::vector<double> l1;
1167 l1.resize(nb_gauss_pts);
1168 auto t_m_at_pts = get_tangent_at_pts();
1169 for (
size_t gg = 0; gg != nb_gauss_pts; ++gg) {
1170 l1[gg] = t_m_at_pts(
i) * t_m_at_pts(
i);
1177 auto l1 = calculate_squared_edge_length();
1182 const size_t nb_gauss_pts = data.
getN(base).size1();
1183 const size_t nb_dofs = data.
getN(base).size2() / 3;
1184 if (nb_gauss_pts && nb_dofs) {
1185 auto t_h_curl = get_base_at_pts(base);
1188 auto t_m_at_pts = get_tangent_at_pts();
1189 for (
int gg = 0; gg != nb_gauss_pts; ++gg) {
1190 const double l0 = l1[gg];
1191 for (
int ll = 0; ll != nb_dofs; ll++) {
1192 const double val = t_h_curl(0);
1193 const double a = val / l0;
1194 t_h_curl(
i) = t_m_at_pts(
i) * a;
1201 for (
int gg = 0; gg != nb_gauss_pts; ++gg) {
1202 for (
int ll = 0; ll != nb_dofs; ll++) {
1203 const double val = t_h_curl(0);
1204 const double a = val / l0;
1205 t_h_curl(
i) = t_m(
i) * a;
1212 if (cc != nb_gauss_pts * nb_dofs)
1224 double *ptr = &*data.data().begin();
1231 OpGetDataAndGradient<3, 3>::getGradAtGaussPtsTensor<3, 3>(
MatrixDouble &data) {
1232 double *ptr = &*data.
data().begin();
1234 &ptr[4], &ptr[5], &ptr[6], &ptr[7],
1244 const unsigned int nb_gauss_pts = data.
getN().size1();
1245 const unsigned int nb_base_functions = data.
getN().size2();
1246 const unsigned int nb_dofs = data.
getFieldData().size();
1250 auto t_val = getValAtGaussPtsTensor<3>(dataAtGaussPts);
1251 auto t_grad = getGradAtGaussPtsTensor<3, 3>(dataGradAtGaussPts);
1254 if (
type == MBVERTEX &&
1255 data.
getDiffN().data().size() == 3 * nb_base_functions) {
1256 for (
unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1259 unsigned int bb = 0;
1260 for (; bb != nb_dofs / 3; ++bb) {
1261 t_val(
i) += t_data(
i) * t_n;
1262 t_grad(
i,
j) += t_data(
i) * t_diff_n(
j);
1269 for (; bb != nb_base_functions; ++bb) {
1275 for (
unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1277 unsigned int bb = 0;
1278 for (; bb != nb_dofs / 3; ++bb) {
1279 t_val(
i) += t_data(
i) * t_n;
1280 t_grad(
i,
j) += t_data(
i) * t_diff_n(
j);
1287 for (; bb != nb_base_functions; ++bb) {
1300 const unsigned int nb_gauss_pts = data.
getN().size1();
1301 const unsigned int nb_base_functions = data.
getN().size2();
1303 const unsigned int nb_dofs = data.
getFieldData().size();
1307 double *ptr = &*dataGradAtGaussPts.data().begin();
1311 if (
type == MBVERTEX &&
1312 data.
getDiffN().data().size() == 3 * nb_base_functions) {
1313 for (
unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1316 unsigned int bb = 0;
1317 for (; bb != nb_dofs / 3; ++bb) {
1318 t_val += t_data * t_n;
1319 t_grad(
i) += t_data * t_diff_n(
i);
1326 for (; bb != nb_base_functions; ++bb) {
1332 for (
unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1334 unsigned int bb = 0;
1335 for (; bb != nb_dofs / 3; ++bb) {
1336 t_val = t_data * t_n;
1337 t_grad(
i) += t_data * t_diff_n(
i);
1344 for (; bb != nb_base_functions; ++bb) {
double cblas_ddot(const int N, const double *X, const int incX, const double *Y, const int incY)
void cblas_dgemv(const enum CBLAS_ORDER order, const enum CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double *A, const int lda, const double *X, const int incX, const double beta, double *Y, const int incY)
T data[Tensor_Dim0][Tensor_Dim1]
FieldApproximationBase
approximation base
@ AINSWORTH_LEGENDRE_BASE
Ainsworth Cole (Legendre) approx. base .
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
#define CHKERRG(n)
Check error code of MoFEM/MOAB/PETSc function.
@ MOFEM_DATA_INCONSISTENCY
static const char *const ApproximationBaseNames[]
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
#define CHKERR
Inline error check.
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
FTensor::Index< 'j', 3 > j
FTensor::Index< 'i', 3 > i
FTensor::Index< 'k', 3 > k
const Tensor2_symmetric_Expr< const ddTensor0< T, Dim, i, j >, typename promote< T, double >::V, Dim, i, j > dd(const Tensor0< T * > &a, const Index< i, Dim > index1, const Index< j, Dim > index2, const Tensor1< int, Dim > &d_ijk, const Tensor1< double, Dim > &d_xyz)
constexpr std::enable_if<(Dim0<=2 &&Dim1<=2), Tensor2_Expr< Levi_Civita< T >, T, Dim0, Dim1, i, j > >::type levi_civita(const Index< i, Dim0 > &, const Index< j, Dim1 > &)
levi_civita functions to make for easy adhoc use
static MoFEMErrorCodeGeneric< PetscErrorCode > ierr
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
ublas::vector< double, DoubleAllocator > VectorDouble
ublas::matrix< double, ublas::row_major, DoubleAllocator > MatrixDouble
implementation of Data Operators for Forces and Sources
MoFEMErrorCode invertTensor3by3(ublas::matrix< T, L, A > &jac_data, ublas::vector< T, A > &det_data, ublas::matrix< T, L, A > &inv_jac_data)
Calculate inverse of tensor rank 2 at integration points.
static FTensor::Tensor0< FTensor::PackPtr< double *, 1 > > getFTensor0FromVec(ublas::vector< T, A > &data)
Get tensor rank 0 (scalar) form data vector.
MoFEMErrorCode determinantTensor3by3(T1 &t, T2 &det)
Calculate determinant 3 by 3.
Data on single entity (This is passed as argument to DataOperator::doWork)
virtual std::array< boost::shared_ptr< MatrixDouble >, MaxBernsteinBezierOrder > & getBBDiffNByOrderArray()
FTensor::Tensor0< FTensor::PackPtr< double *, 1 > > getFTensor0FieldData()
Resturn scalar files as a FTensor of rank 0.
virtual std::map< std::string, boost::shared_ptr< MatrixDouble > > & getBBDiffNMap()
FTensor::Tensor1< double *, Tensor_Dim > getFTensor1DiffN(const FieldApproximationBase base)
Get derivatives of base functions.
const VectorDouble & getFieldData() const
get dofs values
MatrixDouble & getN(const FieldApproximationBase base)
get base functions this return matrix (nb. of rows is equal to nb. of Gauss pts, nb....
FTensor::Tensor1< FTensor::PackPtr< double *, Tensor_Dim >, Tensor_Dim > getFTensor1N(FieldApproximationBase base)
Get base functions for Hdiv/Hcurl spaces.
MatrixDouble & getDiffN(const FieldApproximationBase base)
get derivatives of base functions
FTensor::Tensor0< FTensor::PackPtr< double *, 1 > > getFTensor0N(const FieldApproximationBase base)
Get base function as Tensor0.
FTensor::Tensor1< FTensor::PackPtr< double *, Tensor_Dim >, Tensor_Dim > getFTensor1FieldData()
Return FTensor of rank 1, i.e. vector from filed data coeffinects.
const VectorDofs & getFieldDofs() const
get dofs data stature FEDofEntity
FTensor::Tensor2< FTensor::PackPtr< double *, Tensor_Dim0 *Tensor_Dim1 >, Tensor_Dim0, Tensor_Dim1 > getFTensor2DiffN(FieldApproximationBase base)
Get derivatives of base functions for Hdiv space.
FieldApproximationBase & getBase()
Get approximation base.
data structure for finite element entity
std::array< boost::ptr_vector< EntData >, MBMAXTYPE > dataOnEntities
std::array< bool, MBMAXTYPE > doEntities
If true operator is executed for entity.
virtual MoFEMErrorCode doWork(int row_side, int col_side, EntityType row_type, EntityType col_type, DataForcesAndSourcesCore::EntData &row_data, DataForcesAndSourcesCore::EntData &col_data)
Operator for bi-linear form, usually to calculate values on left hand side.
virtual MoFEMErrorCode opRhs(DataForcesAndSourcesCore &data, const bool error_if_no_base=false)
bool getSymm() const
Get if operator uses symmetry of DOFs or not.
virtual MoFEMErrorCode opLhs(DataForcesAndSourcesCore &row_data, DataForcesAndSourcesCore &col_data)
DataOperator(const bool symm=true)
MatrixDouble & tAngent1_at_GaussPt
MatrixDouble & tAngent2_at_GaussPt
MoFEMErrorCode calculateNormals()
MatrixDouble & cOords_at_GaussPt
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
MatrixDouble & nOrmals_at_GaussPt
MatrixDouble & tAngent2_at_GaussPtF3
MatrixDouble & tAngent1_at_GaussPtF3
MatrixDouble & nOrmals_at_GaussPtF3
MatrixDouble & cOords_at_GaussPtF3
MatrixDouble & nOrmals_at_GaussPtF4
MatrixDouble & cOords_at_GaussPtF4
MoFEMErrorCode calculateNormals()
MatrixDouble & tAngent2_at_GaussPtF4
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
MatrixDouble & tAngent1_at_GaussPtF4
Get field values and gradients at Gauss points.
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Index< 'j', 3 > j
FTensor::Index< 'i', 3 > i
MatrixDouble diffHdivInvJac
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Index< 'k', 3 > k
FTensor::Index< 'j', 3 > j
FTensor::Index< 'i', 3 > i
FTensor::Index< 'j', 3 > j
FTensor::Index< 'i', 3 > i
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Tensor2< double *, 3, 3 > tInvJac
FTensor::Index< 'j', 3 > j
FTensor::Tensor2< double *, 3, 3 > tInvJac
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Index< 'i', 3 > i
FTensor::Index< 'k', 3 > k
MatrixDouble diffHdivInvJac