v0.9.2
DataOperators.cpp
Go to the documentation of this file.
1 /** file DataOperators.cpp
2 
3  \brief implementation of Data Operators for Forces and Sources
4 
5 */
6 
7 /* This file is part of MoFEM.
8  * MoFEM is free software: you can redistribute it and/or modify it under
9  * the terms of the GNU Lesser General Public License as published by the
10  * Free Software Foundation, either version 3 of the License, or (at your
11  * option) any later version.
12  *
13  * MoFEM is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
16  * License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with MoFEM. If not, see <http://www.gnu.org/licenses/>. */
20 
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24 #include <cblas.h>
25 #include <lapack_wrap.h>
26 #include <gm_rule.h>
27 #ifdef __cplusplus
28 }
29 #endif
30 
31 namespace MoFEM {
32 
34  :
35 
36  sYmm(symm),
37 
38  doEntities{true, true, true, true, true, true,
39  true, true, true, true, true, true},
40 
41  doVertices(doEntities[MBVERTEX]), doEdges(doEntities[MBEDGE]),
42  doQuads(doEntities[MBQUAD]), doTris(doEntities[MBTRI]),
43  doTets(doEntities[MBTET]), doPrisms(doEntities[MBPRISM]) {
44 
45  /// This not yet implemented, switch off.
46  doEntities[MBPOLYGON] = false;
47  doEntities[MBPYRAMID] = false;
48  doEntities[MBKNIFE] = false;
49  doEntities[MBHEX] = false;
50  doEntities[MBPOLYHEDRON] = false;
51 }
52 
53 template <bool Symm>
55  DataForcesAndSourcesCore &col_data) {
57 
58  auto do_col_entity =
59  [&](boost::ptr_vector<DataForcesAndSourcesCore::EntData> &row_ent_data,
60  const int ss, const EntityType row_type, const EntityType low_type,
61  const EntityType hi_type) {
63  for (EntityType col_type = low_type; col_type != hi_type; ++col_type) {
64  auto &col_ent_data = col_data.dataOnEntities[col_type];
65  for (size_t SS = 0; SS != col_ent_data.size(); SS++) {
66  if (col_ent_data[SS].getFieldData().size())
67  CHKERR doWork(ss, SS, row_type, col_type, row_ent_data[ss],
68  col_ent_data[SS]);
69  }
70  }
72  };
73 
74  auto do_row_entity = [&](const EntityType type) {
76  auto &row_ent_data = row_data.dataOnEntities[type];
77  for (size_t ss = 0; ss != row_ent_data.size(); ++ss) {
78  size_t SS = 0;
79  if (Symm)
80  SS = ss;
81  for (; SS < col_data.dataOnEntities[type].size(); SS++) {
82  CHKERR doWork(ss, SS, type, type, row_ent_data[ss],
83  col_data.dataOnEntities[type][SS]);
84  }
85  if (!Symm)
86  CHKERR do_col_entity(row_ent_data, ss, type, MBVERTEX, type);
87  CHKERR do_col_entity(row_ent_data, ss, type,
88  static_cast<EntityType>(type + 1), MBMAXTYPE);
89  }
91  };
92 
93  for (EntityType row_type = MBVERTEX; row_type != MBENTITYSET; ++row_type) {
94  if (doEntities[row_type]) {
95  CHKERR do_row_entity(row_type);
96  }
97  }
98 
99  if (doEntities[MBENTITYSET]) {
100  for (unsigned int mm = 0; mm != row_data.dataOnEntities[MBENTITYSET].size();
101  ++mm) {
102  if (!row_data.dataOnEntities[MBENTITYSET][mm].getFieldData().empty()) {
103  CHKERR do_row_entity(MBENTITYSET);
104  }
105  }
106  }
107 
109 }
110 
112  DataForcesAndSourcesCore &col_data) {
113  if (getSymm())
114  return opLhs<true>(row_data, col_data);
115  else
116  return opLhs<false>(row_data, col_data);
117 }
118 
119 template <bool ErrorIfNoBase>
122  const std::array<bool, MBMAXTYPE> &do_entities) {
124 
125  auto do_entity = [&](auto type) {
127 
128  auto &ent_data = data.dataOnEntities[type];
129  const size_t size = ent_data.size();
130  for (size_t ss = 0; ss != size; ++ss) {
131 
132  auto &side_data = ent_data[ss];
133 
134  if (ErrorIfNoBase) {
135  if (side_data.getFieldData().size() &&
136  (side_data.getBase() == NOBASE ||
137  side_data.getBase() == LASTBASE)) {
138  for (VectorDofs::iterator it = side_data.getFieldDofs().begin();
139  it != side_data.getFieldDofs().end(); it++)
140  if ((*it) && (*it)->getActive())
141  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "No base on");
142  }
143  }
144 
145  CHKERR doWork(ss, type, side_data);
146  }
147 
149  };
150 
151  for (EntityType row_type = MBVERTEX; row_type != MBENTITYSET; ++row_type) {
152  if (do_entities[row_type]) {
153  CHKERR do_entity(row_type);
154  }
155  }
156 
157  if (do_entities[MBENTITYSET]) {
158  // This is odd behaviour, diffrent than for other entities. Should be
159  // changed that behaviour is consistent,
160  for (unsigned int mm = 0; mm != data.dataOnEntities[MBENTITYSET].size();
161  ++mm) {
162  if (!data.dataOnEntities[MBENTITYSET][mm].getFieldData().empty()) {
163  CHKERR doWork(mm, MBENTITYSET, data.dataOnEntities[MBENTITYSET][mm]);
164  }
165  }
166  }
167 
169 }
170 
172  const bool error_if_no_base) {
173  if (error_if_no_base)
174  return opRhs<true>(data, doEntities);
175  else
176  return opRhs<false>(data, doEntities);
177 }
178 
179 template <>
180 MoFEMErrorCode invertTensor3by3<3, double, ublas::row_major, DoubleAllocator>(
181  MatrixDouble &jac_data, VectorDouble &det_data,
182  MatrixDouble &inv_jac_data) {
184  auto A = getFTensor2FromMat<3, 3>(jac_data);
185  int nb_gauss_pts = jac_data.size2();
186  det_data.resize(nb_gauss_pts, false);
187  inv_jac_data.resize(3, nb_gauss_pts, false);
188  auto det = getFTensor0FromVec(det_data);
189  auto I = getFTensor2FromMat<3, 3>(inv_jac_data);
190  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
192  CHKERR invertTensor3by3(A, det, I);
193  ++A;
194  ++det;
195  ++I;
196  }
198 }
199 
203 
204  auto transform_base = [&](MatrixDouble &diff_n,
205  const bool diff_at_gauss_ptr) {
207 
208  if (!diff_n.size1())
210  if (!diff_n.size2())
212 
213  const int nb_base_functions =
214  (diff_at_gauss_ptr || type != MBVERTEX) ? diff_n.size2() / 3 : 4;
215  const int nb_gauss_pts =
216  (diff_at_gauss_ptr || type != MBVERTEX) ? diff_n.size1() : 1;
217  diffNinvJac.resize(diff_n.size1(), diff_n.size2(), false);
218 
219  double *t_diff_n_ptr = &*diff_n.data().begin();
221  t_diff_n_ptr, &t_diff_n_ptr[1], &t_diff_n_ptr[2]);
222  double *t_inv_n_ptr = &*diffNinvJac.data().begin();
224  t_inv_n_ptr, &t_inv_n_ptr[1], &t_inv_n_ptr[2]);
225 
226  switch (type) {
227 
228  case MBVERTEX:
229  case MBEDGE:
230  case MBTRI:
231  case MBTET: {
232  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
233  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
234  t_inv_diff_n(i) = t_diff_n(j) * tInvJac(j, i);
235  ++t_diff_n;
236  ++t_inv_diff_n;
237  }
238  }
239 
240  } break;
241  default:
242  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED, "not implemented");
243  }
244 
245  diff_n.data().swap(diffNinvJac.data());
246 
248  };
249 
250  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
251  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
252  CHKERR transform_base(data.getDiffN(base), false);
253  }
254 
255  switch (type) {
256  case MBVERTEX:
257  for (auto &m : data.getBBDiffNMap())
258  CHKERR transform_base(*(m.second), true);
259  break;
260  default:
261  for (auto &ptr : data.getBBDiffNByOrderArray())
262  if (ptr)
263  CHKERR transform_base(*ptr, true);
264  }
265 
267 }
268 
273 
274  if (type != MBEDGE && type != MBTRI && type != MBTET)
276 
277  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
278 
279  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
280 
281  const unsigned int nb_gauss_pts = data.getDiffN(base).size1();
282  const unsigned int nb_base_functions = data.getDiffN(base).size2() / 9;
283  if (!nb_base_functions)
284  continue;
285 
286  diffHdivInvJac.resize(nb_gauss_pts, data.getDiffN(base).size2(), false);
287 
288  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
289  double *t_inv_diff_n_ptr = &*diffHdivInvJac.data().begin();
291  t_inv_diff_n_ptr, &t_inv_diff_n_ptr[HVEC0_1],
292  &t_inv_diff_n_ptr[HVEC0_2],
293 
294  &t_inv_diff_n_ptr[HVEC1_0], &t_inv_diff_n_ptr[HVEC1_1],
295  &t_inv_diff_n_ptr[HVEC1_2],
296 
297  &t_inv_diff_n_ptr[HVEC2_0], &t_inv_diff_n_ptr[HVEC2_1],
298  &t_inv_diff_n_ptr[HVEC2_2]);
299 
300  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
301  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
302  t_inv_diff_n(k, i) = t_diff_n(k, j) * tInvJac(j, i);
303  ++t_diff_n;
304  ++t_inv_diff_n;
305  }
306  }
307 
308  data.getDiffN(base).data().swap(diffHdivInvJac.data());
309  }
310 
312 }
313 
315  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
317 
318  if (type != MBTRI && type != MBTET)
320 
321  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
322 
323  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
324 
325  const unsigned int nb_base_functions = data.getN(base).size2() / 3;
326  if (!nb_base_functions)
327  continue;
328 
329  const double c = 1. / 6.;
330  const unsigned int nb_gauss_pts = data.getN(base).size1();
331 
332  double const a = c / vOlume;
333 
334  piolaN.resize(nb_gauss_pts, data.getN(base).size2(), false);
335  if (data.getN(base).size2() > 0) {
336  auto t_n = data.getFTensor1N<3>(base);
337  double *t_transformed_n_ptr = &*piolaN.data().begin();
339  t_transformed_n_ptr, // HVEC0
340  &t_transformed_n_ptr[HVEC1], &t_transformed_n_ptr[HVEC2]);
341  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
342  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
343  t_transformed_n(i) = a * tJac(i, k) * t_n(k);
344  ++t_n;
345  ++t_transformed_n;
346  }
347  }
348  data.getN(base).data().swap(piolaN.data());
349  }
350 
351  piolaDiffN.resize(nb_gauss_pts, data.getDiffN(base).size2(), false);
352  if (data.getDiffN(base).size2() > 0) {
353  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
354  double *t_transformed_diff_n_ptr = &*piolaDiffN.data().begin();
356  t_transformed_diff_n(t_transformed_diff_n_ptr,
357  &t_transformed_diff_n_ptr[HVEC0_1],
358  &t_transformed_diff_n_ptr[HVEC0_2],
359  &t_transformed_diff_n_ptr[HVEC1_0],
360  &t_transformed_diff_n_ptr[HVEC1_1],
361  &t_transformed_diff_n_ptr[HVEC1_2],
362  &t_transformed_diff_n_ptr[HVEC2_0],
363  &t_transformed_diff_n_ptr[HVEC2_1],
364  &t_transformed_diff_n_ptr[HVEC2_2]);
365  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
366  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
367  t_transformed_diff_n(i, k) = a * tJac(i, j) * t_diff_n(j, k);
368  ++t_diff_n;
369  ++t_transformed_diff_n;
370  }
371  }
372  data.getDiffN(base).data().swap(piolaDiffN.data());
373  }
374  }
375 
377 }
378 
383 
384  if (type != MBEDGE && type != MBTRI && type != MBTET)
386 
387  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
388 
389  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
390 
391  const unsigned int nb_base_functions = data.getN(base).size2() / 3;
392  if (!nb_base_functions)
393  continue;
394 
395  const unsigned int nb_gauss_pts = data.getN(base).size1();
396  piolaN.resize(nb_gauss_pts, data.getN(base).size2(), false);
397  piolaDiffN.resize(nb_gauss_pts, data.getDiffN(base).size2(), false);
398 
399  auto t_n = data.getFTensor1N<3>(base);
400  double *t_transformed_n_ptr = &*piolaN.data().begin();
402  t_transformed_n_ptr, &t_transformed_n_ptr[HVEC1],
403  &t_transformed_n_ptr[HVEC2]);
404  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
405  double *t_transformed_diff_n_ptr = &*piolaDiffN.data().begin();
406  FTensor::Tensor2<FTensor::PackPtr<double *, 9>, 3, 3> t_transformed_diff_n(
407  t_transformed_diff_n_ptr, &t_transformed_diff_n_ptr[HVEC0_1],
408  &t_transformed_diff_n_ptr[HVEC0_2], &t_transformed_diff_n_ptr[HVEC1_0],
409  &t_transformed_diff_n_ptr[HVEC1_1], &t_transformed_diff_n_ptr[HVEC1_2],
410  &t_transformed_diff_n_ptr[HVEC2_0], &t_transformed_diff_n_ptr[HVEC2_1],
411  &t_transformed_diff_n_ptr[HVEC2_2]);
412 
413  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
414  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
415  t_transformed_n(i) = tInvJac(k, i) * t_n(k);
416  t_transformed_diff_n(i, k) = tInvJac(j, i) * t_diff_n(j, k);
417  ++t_n;
418  ++t_transformed_n;
419  ++t_diff_n;
420  ++t_transformed_diff_n;
421  }
422  }
423  data.getN(base).data().swap(piolaN.data());
424  data.getDiffN(base).data().swap(piolaDiffN.data());
425  }
426 
427  // data.getBase() = base;
428 
430 }
431 
433 OpSetHoInvJacH1::doWork(int side, EntityType type,
436 
437  if (invHoJac.size2() != 9)
438  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
439  "It looks that ho inverse of Jacobian is not calculated %d != 9",
440  invHoJac.size2());
441 
442  auto transform_base = [&](MatrixDouble &diff_n) {
444 
445  unsigned int nb_gauss_pts = diff_n.size1();
446  if (nb_gauss_pts == 0)
448 
449  if (invHoJac.size1() == nb_gauss_pts) {
450 
451  unsigned int nb_base_functions = diff_n.size2() / 3;
452  if (nb_base_functions == 0)
454 
455  double *t_inv_jac_ptr = &*invHoJac.data().begin();
457  t_inv_jac_ptr, &t_inv_jac_ptr[1], &t_inv_jac_ptr[2],
458  &t_inv_jac_ptr[3], &t_inv_jac_ptr[4], &t_inv_jac_ptr[5],
459  &t_inv_jac_ptr[6], &t_inv_jac_ptr[7], &t_inv_jac_ptr[8], 9);
460 
461  diffNinvJac.resize(nb_gauss_pts, 3 * nb_base_functions, false);
462 
463  double *t_diff_n_ptr = &*diff_n.data().begin();
465  t_diff_n_ptr, &t_diff_n_ptr[1], &t_diff_n_ptr[2]);
466  double *t_inv_n_ptr = &*diffNinvJac.data().begin();
467  FTensor::Tensor1<double *, 3> t_inv_diff_n(t_inv_n_ptr, &t_inv_n_ptr[1],
468  &t_inv_n_ptr[2], 3);
469 
470  switch (type) {
471  case MBVERTEX:
472  case MBEDGE:
473  case MBTRI:
474  case MBTET: {
475  for (unsigned int gg = 0; gg < nb_gauss_pts; ++gg) {
476  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
477  t_inv_diff_n(i) = t_diff_n(j) * t_inv_jac(j, i);
478  ++t_diff_n;
479  ++t_inv_diff_n;
480  }
481  ++t_inv_jac;
482  }
483  } break;
484  default:
485  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED, "not implemented");
486  }
487 
488  diff_n.data().swap(diffNinvJac.data());
489  }
491  };
492 
493  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
494  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
495  CHKERR transform_base(data.getDiffN(base));
496  }
497 
498  switch (type) {
499  case MBVERTEX:
500  for (auto &m : data.getBBDiffNMap())
501  CHKERR transform_base(*(m.second));
502  break;
503  default:
504  for (auto &ptr : data.getBBDiffNByOrderArray())
505  if (ptr)
506  CHKERR transform_base(*ptr);
507  }
508 
510 }
511 
516 
517  if (type != MBEDGE && type != MBTRI && type != MBTET)
519 
520  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
521 
522  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
523 
524  diffHdivInvJac.resize(data.getDiffN(base).size1(),
525  data.getDiffN(base).size2(), false);
526 
527  unsigned int nb_gauss_pts = data.getDiffN(base).size1();
528  unsigned int nb_base_functions = data.getDiffN(base).size2() / 9;
529  if (nb_base_functions == 0)
530  continue;
531 
532  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
533  double *t_inv_diff_n_ptr = &*diffHdivInvJac.data().begin();
535  t_inv_diff_n_ptr, &t_inv_diff_n_ptr[HVEC0_1],
536  &t_inv_diff_n_ptr[HVEC0_2], &t_inv_diff_n_ptr[HVEC1_0],
537  &t_inv_diff_n_ptr[HVEC1_1], &t_inv_diff_n_ptr[HVEC1_2],
538  &t_inv_diff_n_ptr[HVEC2_0], &t_inv_diff_n_ptr[HVEC2_1],
539  &t_inv_diff_n_ptr[HVEC2_2]);
540  double *t_inv_jac_ptr = &*invHoJac.data().begin();
542  t_inv_jac_ptr, &t_inv_jac_ptr[1], &t_inv_jac_ptr[2], &t_inv_jac_ptr[3],
543  &t_inv_jac_ptr[4], &t_inv_jac_ptr[5], &t_inv_jac_ptr[6],
544  &t_inv_jac_ptr[7], &t_inv_jac_ptr[8]);
545 
546  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
547  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
548  t_inv_diff_n(i, j) = t_inv_jac(k, j) * t_diff_n(i, k);
549  ++t_diff_n;
550  ++t_inv_diff_n;
551  }
552  ++t_inv_jac;
553  }
554 
555  data.getDiffN(base).data().swap(diffHdivInvJac.data());
556  }
557 
559 }
560 
562  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
564 
565  if (type != MBTRI && type != MBTET)
567 
568  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
569 
570  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
571 
572  unsigned int nb_gauss_pts = data.getN(base).size1();
573  unsigned int nb_base_functions = data.getN(base).size2() / 3;
574  piolaN.resize(nb_gauss_pts, data.getN(base).size2(), false);
575  piolaDiffN.resize(nb_gauss_pts, data.getDiffN(base).size2(), false);
576 
577  auto t_n = data.getFTensor1N<3>(base);
578  double *t_transformed_n_ptr = &*piolaN.data().begin();
580  t_transformed_n_ptr, // HVEC0
581  &t_transformed_n_ptr[HVEC1], &t_transformed_n_ptr[HVEC2]);
582  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
583  double *t_transformed_diff_n_ptr = &*piolaDiffN.data().begin();
584  FTensor::Tensor2<FTensor::PackPtr<double *, 9>, 3, 3> t_transformed_diff_n(
585  t_transformed_diff_n_ptr, &t_transformed_diff_n_ptr[HVEC0_1],
586  &t_transformed_diff_n_ptr[HVEC0_2], &t_transformed_diff_n_ptr[HVEC1_0],
587  &t_transformed_diff_n_ptr[HVEC1_1], &t_transformed_diff_n_ptr[HVEC1_2],
588  &t_transformed_diff_n_ptr[HVEC2_0], &t_transformed_diff_n_ptr[HVEC2_1],
589  &t_transformed_diff_n_ptr[HVEC2_2]);
590 
591  FTensor::Tensor0<double *> t_det(&*detHoJac.data().begin());
592  double *t_jac_ptr = &*hoJac.data().begin();
594  t_jac_ptr, &t_jac_ptr[1], &t_jac_ptr[2], &t_jac_ptr[3], &t_jac_ptr[4],
595  &t_jac_ptr[5], &t_jac_ptr[6], &t_jac_ptr[7], &t_jac_ptr[8]);
596 
597  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
598  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
599  const double a = 1. / t_det;
600  t_transformed_n(i) = a * t_jac(i, k) * t_n(k);
601  t_transformed_diff_n(i, k) = a * t_jac(i, j) * t_diff_n(j, k);
602  ++t_n;
603  ++t_transformed_n;
604  ++t_diff_n;
605  ++t_transformed_diff_n;
606  }
607  ++t_det;
608  ++t_jac;
609  }
610 
611  data.getN(base).data().swap(piolaN.data());
612  data.getDiffN(base).data().swap(piolaDiffN.data());
613  }
614 
616 }
617 
619  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
621 
622  if (type != MBEDGE && type != MBTRI && type != MBTET)
624 
625  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
626 
627  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
628 
629  unsigned int nb_gauss_pts = data.getN(base).size1();
630  unsigned int nb_base_functions = data.getN(base).size2() / 3;
631  piolaN.resize(nb_gauss_pts, data.getN(base).size2(), false);
632  piolaDiffN.resize(nb_gauss_pts, data.getDiffN(base).size2(), false);
633 
634  auto t_n = data.getFTensor1N<3>(base);
635  double *t_transformed_n_ptr = &*piolaN.data().begin();
637  t_transformed_n_ptr, // HVEC0
638  &t_transformed_n_ptr[HVEC1], &t_transformed_n_ptr[HVEC2]);
639  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
640  double *t_transformed_diff_n_ptr = &*piolaDiffN.data().begin();
641  FTensor::Tensor2<FTensor::PackPtr<double *, 9>, 3, 3> t_transformed_diff_n(
642  t_transformed_diff_n_ptr, &t_transformed_diff_n_ptr[HVEC0_1],
643  &t_transformed_diff_n_ptr[HVEC0_2], &t_transformed_diff_n_ptr[HVEC1_0],
644  &t_transformed_diff_n_ptr[HVEC1_1], &t_transformed_diff_n_ptr[HVEC1_2],
645  &t_transformed_diff_n_ptr[HVEC2_0], &t_transformed_diff_n_ptr[HVEC2_1],
646  &t_transformed_diff_n_ptr[HVEC2_2]);
647 
648  double *t_inv_jac_ptr = &*hoInvJac.data().begin();
650  t_inv_jac_ptr, &t_inv_jac_ptr[1], &t_inv_jac_ptr[2], &t_inv_jac_ptr[3],
651  &t_inv_jac_ptr[4], &t_inv_jac_ptr[5], &t_inv_jac_ptr[6],
652  &t_inv_jac_ptr[7], &t_inv_jac_ptr[8], 9);
653 
654  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
655  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
656  t_transformed_n(i) = t_inv_jac(k, i) * t_n(k);
657  t_transformed_diff_n(i, k) = t_inv_jac(j, i) * t_diff_n(j, k);
658  ++t_n;
659  ++t_transformed_n;
660  ++t_diff_n;
661  ++t_transformed_diff_n;
662  }
663  ++t_inv_jac;
664  }
665 
666  data.getN(base).data().swap(piolaN.data());
667  data.getDiffN(base).data().swap(piolaDiffN.data());
668  }
669 
671 }
672 
677 
678  unsigned int nb_dofs = data.getFieldData().size();
679  if (nb_dofs == 0)
681 
682  int nb_gauss_pts = data.getN().size1();
683  cOords_at_GaussPt.resize(nb_gauss_pts, 3, false);
684  tAngent1_at_GaussPt.resize(nb_gauss_pts, 3, false);
685  tAngent2_at_GaussPt.resize(nb_gauss_pts, 3, false);
686 
687  auto get_ftensor1 = [](MatrixDouble &m) {
689  &m(0, 0), &m(0, 1), &m(0, 2));
690  };
691  auto t_coords = get_ftensor1(cOords_at_GaussPt);
692  auto t_t1 = get_ftensor1(tAngent1_at_GaussPt);
693  auto t_t2 = get_ftensor1(tAngent2_at_GaussPt);
697 
698  switch (type) {
699  case MBVERTEX: {
700  cOords_at_GaussPt.clear();
701  tAngent1_at_GaussPt.clear();
702  tAngent2_at_GaussPt.clear();
703  }
704  case MBEDGE:
705  case MBTRI:
706  case MBQUAD: {
707  if (2 * data.getN().size2() != data.getDiffN().size2()) {
708  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "data inconsistency");
709  }
710  if (nb_dofs % 3 != 0) {
711  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "data inconsistency");
712  }
713  if (nb_dofs > 3 * data.getN().size2()) {
714  unsigned int nn = 0;
715  for (; nn != nb_dofs; nn++) {
716  if (!data.getFieldDofs()[nn]->getActive())
717  break;
718  }
719  if (nn > 3 * data.getN().size2()) {
720  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
721  "data inconsistency for base %s",
723  } else {
724  nb_dofs = nn;
725  if (!nb_dofs)
727  }
728  }
729  const int nb_base_functions = data.getN().size2();
730  auto t_base = data.getFTensor0N();
731  auto t_diff_base = data.getFTensor1DiffN<2>();
732  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
733  auto t_data = data.getFTensor1FieldData<3>();
734  int bb = 0;
735  for (; bb != nb_dofs / 3; ++bb) {
736  t_coords(i) += t_base * t_data(i);
737  t_t1(i) += t_data(i) * t_diff_base(N0);
738  t_t2(i) += t_data(i) * t_diff_base(N1);
739  ++t_data;
740  ++t_base;
741  ++t_diff_base;
742  }
743  for (; bb != nb_base_functions; ++bb) {
744  ++t_base;
745  ++t_diff_base;
746  }
747  ++t_coords;
748  ++t_t1;
749  ++t_t2;
750  }
751  } break;
752  default:
753  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED, "not implemented");
754  }
755 
757 }
758 
761 
762  nOrmals_at_GaussPt.resize(tAngent1_at_GaussPt.size1(), 3, false);
763 
764  auto get_ftensor1 = [](MatrixDouble &m) {
766  &m(0, 0), &m(0, 1), &m(0, 2));
767  };
768  auto t_normal = get_ftensor1(nOrmals_at_GaussPt);
769  auto t_t1 = get_ftensor1(tAngent1_at_GaussPt);
770  auto t_t2 = get_ftensor1(tAngent2_at_GaussPt);
771 
775 
776  for (unsigned int gg = 0; gg != tAngent1_at_GaussPt.size1(); ++gg) {
777  t_normal(j) = FTensor::levi_civita(i, j, k) * t_t1(k) * t_t2(i);
778  ++t_normal;
779  ++t_t1;
780  ++t_t2;
781  }
782 
784 }
785 
790 
791  if (data.getFieldData().size() == 0)
793  const int valid_edges3[] = {1, 1, 1, 0, 0, 0, 0, 0, 0};
794  const int valid_faces3[] = {0, 0, 0, 1, 0, 0, 0, 0, 0};
795  const int valid_edges4[] = {0, 0, 0, 0, 0, 0, 1, 1, 1};
796  const int valid_faces4[] = {0, 0, 0, 0, 1, 0, 0, 0, 0};
797 
798  if (type == MBEDGE) {
799  if (!valid_edges3[side] || valid_edges4[side])
801  } else if (type == MBTRI) {
802  if (!valid_faces3[side] || valid_faces4[side])
804  }
805 
806  switch (type) {
807  case MBVERTEX: {
808  for (unsigned int gg = 0; gg < data.getN().size1(); ++gg) {
809  for (int dd = 0; dd < 3; dd++) {
810  cOords_at_GaussPtF3(gg, dd) =
811  cblas_ddot(3, &data.getN(gg)[0], 1, &data.getFieldData()[dd], 3);
813  3, &data.getDiffN()(gg, 0), 2, &data.getFieldData()[dd], 3);
815  3, &data.getDiffN()(gg, 1), 2, &data.getFieldData()[dd], 3);
817  3, &data.getN(gg)[0], 1, &data.getFieldData()[9 + dd], 3);
819  3, &data.getDiffN()(gg, 6 + 0), 2, &data.getFieldData()[9 + dd], 3);
821  3, &data.getDiffN()(gg, 6 + 1), 2, &data.getFieldData()[9 + dd], 3);
822  }
823  }
824  } break;
825  case MBEDGE:
826  case MBTRI: {
827  if (2 * data.getN().size2() != data.getDiffN().size2()) {
828  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "data inconsistency");
829  }
830  unsigned int nb_dofs = data.getFieldData().size();
831  if (nb_dofs % 3 != 0) {
832  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "data inconsistency");
833  }
834  if (nb_dofs > 3 * data.getN().size2()) {
835  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
836  "data inconsistency, side %d type %d", side, type);
837  }
838  for (unsigned int gg = 0; gg < data.getN().size1(); ++gg) {
839  for (int dd = 0; dd < 3; dd++) {
840  if ((type == MBTRI && valid_faces3[side]) ||
841  (type == MBEDGE && valid_edges3[side])) {
843  nb_dofs / 3, &data.getN(gg)[0], 1, &data.getFieldData()[dd], 3);
844  tAngent1_at_GaussPtF3(gg, dd) +=
845  cblas_ddot(nb_dofs / 3, &data.getDiffN()(gg, 0), 2,
846  &data.getFieldData()[dd], 3);
847  tAngent2_at_GaussPtF3(gg, dd) +=
848  cblas_ddot(nb_dofs / 3, &data.getDiffN()(gg, 1), 2,
849  &data.getFieldData()[dd], 3);
850  } else if ((type == MBTRI && valid_faces4[side]) ||
851  (type == MBEDGE && valid_edges4[side])) {
853  nb_dofs / 3, &data.getN(gg)[0], 1, &data.getFieldData()[dd], 3);
854  tAngent1_at_GaussPtF4(gg, dd) +=
855  cblas_ddot(nb_dofs / 3, &data.getDiffN()(gg, 0), 2,
856  &data.getFieldData()[dd], 3);
857  tAngent2_at_GaussPtF4(gg, dd) +=
858  cblas_ddot(nb_dofs / 3, &data.getDiffN()(gg, 1), 2,
859  &data.getFieldData()[dd], 3);
860  }
861  }
862  }
863  } break;
864  default:
865  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED, "not implemented");
866  }
867 
869 }
870 
873 
874  sPin.resize(3, 3);
875  sPin.clear();
876  nOrmals_at_GaussPtF3.resize(tAngent1_at_GaussPtF3.size1(), 3, false);
877  for (unsigned int gg = 0; gg < tAngent1_at_GaussPtF3.size1(); ++gg) {
878  ierr = Spin(&*sPin.data().begin(), &tAngent1_at_GaussPtF3(gg, 0));
879  CHKERRG(ierr);
880  cblas_dgemv(CblasRowMajor, CblasNoTrans, 3, 3, 1., &*sPin.data().begin(), 3,
881  &tAngent2_at_GaussPtF3(gg, 0), 1, 0.,
882  &nOrmals_at_GaussPtF3(gg, 0), 1);
883  }
884  sPin.clear();
885  nOrmals_at_GaussPtF4.resize(tAngent1_at_GaussPtF4.size1(), 3, false);
886  for (unsigned int gg = 0; gg < tAngent1_at_GaussPtF4.size1(); ++gg) {
887  ierr = Spin(&*sPin.data().begin(), &tAngent1_at_GaussPtF4(gg, 0));
888  CHKERRG(ierr);
889  cblas_dgemv(CblasRowMajor, CblasNoTrans, 3, 3, 1., &*sPin.data().begin(), 3,
890  &tAngent2_at_GaussPtF4(gg, 0), 1, 0.,
891  &nOrmals_at_GaussPtF4(gg, 0), 1);
892  }
894 }
895 
897  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
899 
900  if (type != MBTRI)
902 
903  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
904 
905  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
906 
907  int nb_gauss_pts = data.getN(base).size1();
908  if (nb_gauss_pts) {
910  auto t_normal =
912  const double l02 = t_normal(i) * t_normal(i);
913  int nb_base_functions = data.getN(base).size2() / 3;
914  auto t_n = data.getFTensor1N<3>(base);
915  if (normalsAtGaussPts.size1() == (unsigned int)nb_gauss_pts) {
916  auto t_ho_normal =
918  &normalsAtGaussPts(0, 0), &normalsAtGaussPts(0, 1),
919  &normalsAtGaussPts(0, 2));
920  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
921  for (int bb = 0; bb != nb_base_functions; ++bb) {
922  const double v = t_n(0);
923  const double l2 = t_ho_normal(i) * t_ho_normal(i);
924  t_n(i) = (v / l2) * t_ho_normal(i);
925  ++t_n;
926  }
927  ++t_ho_normal;
928  }
929  } else {
930  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
931  for (int bb = 0; bb != nb_base_functions; ++bb) {
932  const double v = t_n(0);
933  t_n(i) = (v / l02) * t_normal(i);
934  ++t_n;
935  }
936  }
937  }
938  }
939  }
940 
942 }
943 
945  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
947 
948  if (type != MBEDGE && type != MBTRI)
950 
954 
956  &tAngent0[0], &tAngent1[0], &nOrmal[0],
957 
958  &tAngent0[1], &tAngent1[1], &nOrmal[1],
959 
960  &tAngent0[2], &tAngent1[2], &nOrmal[2],
961 
962  3);
963  double det;
965  CHKERR determinantTensor3by3(t_m, det);
966  CHKERR invertTensor3by3(t_m, det, t_inv_m);
967 
968  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; ++b) {
969 
970  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
971 
972  auto &baseN = data.getN(base);
973  auto &diffBaseN = data.getDiffN(base);
974 
975  int nb_dofs = baseN.size2() / 3;
976  int nb_gauss_pts = baseN.size1();
977 
978  MatrixDouble piola_n(baseN.size1(), baseN.size2());
979  MatrixDouble diff_piola_n(diffBaseN.size1(), diffBaseN.size2());
980 
981  if (nb_dofs > 0 && nb_gauss_pts > 0) {
982 
984  &baseN(0, HVEC0), &baseN(0, HVEC1), &baseN(0, HVEC2));
986  &diffBaseN(0, HVEC0_0), &diffBaseN(0, HVEC0_1),
987  &diffBaseN(0, HVEC1_0), &diffBaseN(0, HVEC1_1),
988  &diffBaseN(0, HVEC2_0), &diffBaseN(0, HVEC2_1));
989  FTensor::Tensor1<FTensor::PackPtr<double *, 3>, 3> t_transformed_h_curl(
990  &piola_n(0, HVEC0), &piola_n(0, HVEC1), &piola_n(0, HVEC2));
992  t_transformed_diff_h_curl(
993  &diff_piola_n(0, HVEC0_0), &diff_piola_n(0, HVEC0_1),
994  &diff_piola_n(0, HVEC1_0), &diff_piola_n(0, HVEC1_1),
995  &diff_piola_n(0, HVEC2_0), &diff_piola_n(0, HVEC2_1));
996 
997  int cc = 0;
998  if (normalsAtGaussPts.size1() == (unsigned int)nb_gauss_pts) {
999  // HO geometry is set, so jacobian is different at each gauss point
1001  &tangent0AtGaussPt(0, 0), &tangent1AtGaussPt(0, 0),
1002  &normalsAtGaussPts(0, 0), &tangent0AtGaussPt(0, 1),
1003  &tangent1AtGaussPt(0, 1), &normalsAtGaussPts(0, 1),
1004  &tangent0AtGaussPt(0, 2), &tangent1AtGaussPt(0, 2),
1005  &normalsAtGaussPts(0, 2), 3);
1006  for (int gg = 0; gg < nb_gauss_pts; ++gg) {
1007  CHKERR determinantTensor3by3(t_m_at_pts, det);
1008  CHKERR invertTensor3by3(t_m_at_pts, det, t_inv_m);
1009  for (int ll = 0; ll != nb_dofs; ll++) {
1010  t_transformed_h_curl(i) = t_inv_m(j, i) * t_h_curl(j);
1011  t_transformed_diff_h_curl(i, k) =
1012  t_inv_m(j, i) * t_diff_h_curl(j, k);
1013  ++t_h_curl;
1014  ++t_transformed_h_curl;
1015  ++t_diff_h_curl;
1016  ++t_transformed_diff_h_curl;
1017  ++cc;
1018  }
1019  ++t_m_at_pts;
1020  }
1021  } else {
1022  for (int gg = 0; gg < nb_gauss_pts; ++gg) {
1023  for (int ll = 0; ll != nb_dofs; ll++) {
1024  t_transformed_h_curl(i) = t_inv_m(j, i) * t_h_curl(j);
1025  t_transformed_diff_h_curl(i, k) =
1026  t_inv_m(j, i) * t_diff_h_curl(j, k);
1027  ++t_h_curl;
1028  ++t_transformed_h_curl;
1029  ++t_diff_h_curl;
1030  ++t_transformed_diff_h_curl;
1031  ++cc;
1032  }
1033  }
1034  }
1035  if (cc != nb_gauss_pts * nb_dofs)
1036  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSIBLE_CASE, "Data inconsistency");
1037 
1038  baseN.data().swap(piola_n.data());
1039  diffBaseN.data().swap(diff_piola_n.data());
1040  }
1041  }
1042 
1044 }
1045 
1047 OpGetHoTangentOnEdge::doWork(int side, EntityType type,
1050 
1051  int nb_dofs = data.getFieldData().size();
1052  if (nb_dofs == 0)
1054 
1055  int nb_gauss_pts = data.getN().size1();
1056  tAngent.resize(nb_gauss_pts, 3, false);
1057 
1058  int nb_approx_fun = data.getN().size2();
1059  double *diff = &*data.getDiffN().data().begin();
1060  double *dofs[] = {&data.getFieldData()[0], &data.getFieldData()[1],
1061  &data.getFieldData()[2]};
1062 
1063  tAngent.resize(nb_gauss_pts, 3, false);
1064 
1065  switch (type) {
1066  case MBVERTEX:
1067  for (int dd = 0; dd != 3; dd++) {
1068  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1069  tAngent(gg, dd) = cblas_ddot(2, diff, 1, dofs[dd], 3);
1070  }
1071  }
1072  break;
1073  case MBEDGE:
1074  if (nb_dofs % 3) {
1075  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSIBLE_CASE,
1076  "Approximated field should be rank 3, i.e. vector in 3d space");
1077  }
1078  for (int dd = 0; dd != 3; dd++) {
1079  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1080  tAngent(gg, dd) +=
1081  cblas_ddot(nb_dofs / 3, &diff[gg * nb_approx_fun], 1, dofs[dd], 3);
1082  }
1083  }
1084  break;
1085  default:
1086  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSIBLE_CASE,
1087  "This operator can calculate tangent vector only on edge");
1088  }
1089 
1091 }
1092 
1094  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1096 
1097  if (type != MBEDGE)
1099 
1102  &tAngent[0], &tAngent[1], &tAngent[2]);
1103  const double l0 = t_m(i) * t_m(i);
1104 
1105  auto get_base_at_pts = [&](auto base) {
1107  &data.getN(base)(0, HVEC0), &data.getN(base)(0, HVEC1),
1108  &data.getN(base)(0, HVEC2));
1109  return t_h_curl;
1110  };
1111 
1112  auto get_tangent_at_pts = [&]() {
1114  &tangentAtGaussPt(0, 0), &tangentAtGaussPt(0, 1),
1115  &tangentAtGaussPt(0, 2));
1116  return t_m_at_pts;
1117  };
1118 
1119  auto calculate_squared_edge_length = [&]() {
1120  std::vector<double> l1;
1121  int nb_gauss_pts = tangentAtGaussPt.size1();
1122  if (nb_gauss_pts) {
1123  l1.resize(nb_gauss_pts);
1124  auto t_m_at_pts = get_tangent_at_pts();
1125  for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
1126  l1[gg] = t_m_at_pts(i) * t_m_at_pts(i);
1127  ++t_m_at_pts;
1128  }
1129  }
1130  return l1;
1131  };
1132 
1133  auto l1 = calculate_squared_edge_length();
1134 
1135  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
1136 
1137  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
1138  const size_t nb_gauss_pts = data.getN(base).size1();
1139  const size_t nb_dofs = data.getN(base).size2() / 3;
1140  if (nb_gauss_pts && nb_dofs) {
1141  auto t_h_curl = get_base_at_pts(base);
1142  int cc = 0;
1143  if (tangentAtGaussPt.size1() == nb_gauss_pts) {
1144  auto t_m_at_pts = get_tangent_at_pts();
1145  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1146  const double l0 = l1[gg];
1147  for (int ll = 0; ll != nb_dofs; ll++) {
1148  const double val = t_h_curl(0);
1149  const double a = val / l0;
1150  t_h_curl(i) = t_m_at_pts(i) * a;
1151  ++t_h_curl;
1152  ++cc;
1153  }
1154  ++t_m_at_pts;
1155  }
1156  } else {
1157  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1158  for (int ll = 0; ll != nb_dofs; ll++) {
1159  const double val = t_h_curl(0);
1160  const double a = val / l0;
1161  t_h_curl(i) = t_m(i) * a;
1162  ++t_h_curl;
1163  ++cc;
1164  }
1165  }
1166  }
1167 
1168  if (cc != nb_gauss_pts * nb_dofs)
1169  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSIBLE_CASE, "Data inconsistency");
1170  }
1171  }
1172 
1174 }
1175 
1176 template <>
1177 template <>
1180  double *ptr = &*data.data().begin();
1181  return FTensor::Tensor1<double *, 3>(ptr, &ptr[1], &ptr[2], 3);
1182 }
1183 
1184 template <>
1185 template <>
1188  double *ptr = &*data.data().begin();
1189  return FTensor::Tensor2<double *, 3, 3>(ptr, &ptr[1], &ptr[2], &ptr[3],
1190  &ptr[4], &ptr[5], &ptr[6], &ptr[7],
1191  &ptr[8], 9);
1192 }
1193 
1194 template <>
1196  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1198  if (data.getBase() == NOBASE)
1200  const unsigned int nb_gauss_pts = data.getN().size1();
1201  const unsigned int nb_base_functions = data.getN().size2();
1202  const unsigned int nb_dofs = data.getFieldData().size();
1203  if (!nb_dofs)
1205  auto t_n = data.getFTensor0N();
1206  auto t_val = getValAtGaussPtsTensor<3>(dataAtGaussPts);
1207  auto t_grad = getGradAtGaussPtsTensor<3, 3>(dataGradAtGaussPts);
1210  if (type == MBVERTEX &&
1211  data.getDiffN().data().size() == 3 * nb_base_functions) {
1212  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1213  auto t_data = data.getFTensor1FieldData<3>();
1214  auto t_diff_n = data.getFTensor1DiffN<3>();
1215  unsigned int bb = 0;
1216  for (; bb != nb_dofs / 3; ++bb) {
1217  t_val(i) += t_data(i) * t_n;
1218  t_grad(i, j) += t_data(i) * t_diff_n(j);
1219  ++t_n;
1220  ++t_diff_n;
1221  ++t_data;
1222  }
1223  ++t_val;
1224  ++t_grad;
1225  for (; bb != nb_base_functions; ++bb) {
1226  ++t_n;
1227  }
1228  }
1229  } else {
1230  auto t_diff_n = data.getFTensor1DiffN<3>();
1231  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1232  auto t_data = data.getFTensor1FieldData<3>();
1233  unsigned int bb = 0;
1234  for (; bb != nb_dofs / 3; ++bb) {
1235  t_val(i) += t_data(i) * t_n;
1236  t_grad(i, j) += t_data(i) * t_diff_n(j);
1237  ++t_n;
1238  ++t_diff_n;
1239  ++t_data;
1240  }
1241  ++t_val;
1242  ++t_grad;
1243  for (; bb != nb_base_functions; ++bb) {
1244  ++t_n;
1245  ++t_diff_n;
1246  }
1247  }
1248  }
1250 }
1251 
1252 template <>
1254  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1256  const unsigned int nb_gauss_pts = data.getN().size1();
1257  const unsigned int nb_base_functions = data.getN().size2();
1258  // bool constant_diff = false;
1259  const unsigned int nb_dofs = data.getFieldData().size();
1260  auto t_n = data.getFTensor0N();
1262  FTensor::Tensor0<double *>(&*dataAtGaussPts.data().begin(), 1);
1263  double *ptr = &*dataGradAtGaussPts.data().begin();
1264  FTensor::Tensor1<FTensor::PackPtr<double *, 3>, 3> t_grad(ptr, &ptr[1],
1265  &ptr[2]);
1267  if (type == MBVERTEX &&
1268  data.getDiffN().data().size() == 3 * nb_base_functions) {
1269  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1270  auto t_data = data.getFTensor0FieldData();
1271  auto t_diff_n = data.getFTensor1DiffN<3>();
1272  unsigned int bb = 0;
1273  for (; bb != nb_dofs / 3; ++bb) {
1274  t_val += t_data * t_n;
1275  t_grad(i) += t_data * t_diff_n(i);
1276  ++t_n;
1277  ++t_diff_n;
1278  ++t_data;
1279  }
1280  ++t_val;
1281  ++t_grad;
1282  for (; bb != nb_base_functions; ++bb) {
1283  ++t_n;
1284  }
1285  }
1286  } else {
1287  auto t_diff_n = data.getFTensor1DiffN<3>();
1288  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1289  auto t_data = data.getFTensor0FieldData();
1290  unsigned int bb = 0;
1291  for (; bb != nb_dofs / 3; ++bb) {
1292  t_val = t_data * t_n;
1293  t_grad(i) += t_data * t_diff_n(i);
1294  ++t_n;
1295  ++t_diff_n;
1296  ++t_data;
1297  }
1298  ++t_val;
1299  ++t_grad;
1300  for (; bb != nb_base_functions; ++bb) {
1301  ++t_n;
1302  ++t_diff_n;
1303  }
1304  }
1305  }
1307 }
1308 } // namespace MoFEM
MatrixDouble diffNinvJac
static FTensor::Tensor0< FTensor::PackPtr< double *, 1 > > getFTensor0FromVec(ublas::vector< T, A > &data)
Get tensor rank 0 (scalar) form data vector.
Definition: Templates.hpp:141
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FieldApproximationBase & getBase()
Get approximation base.
void cblas_dgemv(const enum CBLAS_ORDER order, const enum CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double *A, const int lda, const double *X, const int incX, const double beta, double *Y, const int incY)
Definition: cblas_dgemv.c:11
DataOperator(const bool symm=true)
MoFEMErrorCode determinantTensor3by3(T1 &t, T2 &det)
Calculate determinant 3 by 3.
Definition: Templates.hpp:482
FTensor::Index< 'j', 3 > j
virtual MoFEMErrorCode opRhs(DataForcesAndSourcesCore &data, const bool error_if_no_base=false)
data structure for finite element entityIt keeps that about indices of degrees of freedom,...
PetscErrorCode Spin(double *spinOmega, double *vecOmega)
calculate spin matrix from vector
Definition: fem_tools.c:558
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Index< 'i', 3 > i
FTensor::Index< 'i', 3 > i
std::array< boost::ptr_vector< EntData >, MBMAXTYPE > dataOnEntities
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:507
virtual MoFEMErrorCode opLhs(DataForcesAndSourcesCore &row_data, DataForcesAndSourcesCore &col_data)
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
ublas::matrix< double, ublas::row_major, DoubleAllocator > MatrixDouble
Definition: Types.hpp:75
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:483
#define CHKERRG(n)
Check error code of MoFEM/MOAB/PETSc function.
Definition: definitions.h:550
std::array< bool, MBMAXTYPE > doEntities
If true operator is executed for entity.
FTensor::Index< 'I', 3 > I
Definition: PlasticOps.hpp:70
FTensor::Tensor2< double *, 3, 3 > tInvJac
FTensor::Tensor2< double *, 3, 3 > tInvJac
FTensor::Tensor1< double *, Tensor_Dim > getFTensor1DiffN(const FieldApproximationBase base)
Get derivatives of base functions.
virtual MoFEMErrorCode doWork(int row_side, int col_side, EntityType row_type, EntityType col_type, DataForcesAndSourcesCore::EntData &row_data, DataForcesAndSourcesCore::EntData &col_data)
Operator for bi-linear form, usually to calculate values on left hand side.
FTensor::Tensor2< double *, 3, 3 > tInvJac
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:514
FTensor::Index< 'i', 3 > i
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
implementation of Data Operators for Forces and Sources
Definition: Common.hpp:21
FTensor::Tensor0< FTensor::PackPtr< double *, 1 > > getFTensor0FieldData()
Resturn scalar files as a FTensor of rank 0.
virtual std::map< std::string, boost::shared_ptr< MatrixDouble > > & getBBDiffNMap()
double cblas_ddot(const int N, const double *X, const int incX, const double *Y, const int incY)
Definition: cblas_ddot.c:12
FTensor::Index< 'i', 3 > i
static const char *const ApproximationBaseNames[]
Definition: definitions.h:164
MoFEMErrorCode invertTensor3by3(ublas::matrix< T, L, A > &jac_data, ublas::vector< T, A > &det_data, ublas::matrix< T, L, A > &inv_jac_data)
Calculate inverse of tensor rank 2 at integration points.
Definition: Templates.hpp:463
Get field values and gradients at Gauss points.
FTensor::Index< 'j', 3 > j
FTensor::Index< 'k', 3 > k
FTensor::Tensor1< FTensor::PackPtr< double *, Tensor_Dim >, Tensor_Dim > getFTensor1FieldData()
Return FTensor of rank 1, i.e. vector from filed data coeffinects.
bool getSymm() const
Get if operator uses symmetry of DOFs or not.
const Tensor2_symmetric_Expr< const ddTensor0< T, Dim, i, j >, typename promote< T, double >::V, Dim, i, j > dd(const Tensor0< T * > &a, const Index< i, Dim > index1, const Index< j, Dim > index2, const Tensor1< int, Dim > &d_ijk, const Tensor1< double, Dim > &d_xyz)
Definition: ddTensor0.hpp:33
FieldApproximationBase
approximation base
Definition: definitions.h:150
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Index< 'm', 2 > m
Definition: PlasticOps.hpp:67
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
Definition: Exceptions.hpp:67
Ainsworth Cole (Legendre) approx. base .
Definition: definitions.h:152
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Index< 'k', 3 > k
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
#define CHKERR
Inline error check.
Definition: definitions.h:602
FTensor::Tensor2< double *, 3, 3 > tJac
FTensor::Index< 'k', 2 > k
Definition: ContactOps.hpp:28
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
const VectorDofs & getFieldDofs() const
get dofs data stature FEDofEntity
FTensor::Tensor2< FTensor::PackPtr< double *, Tensor_Dim0 *Tensor_Dim1 >, Tensor_Dim0, Tensor_Dim1 > getFTensor2DiffN(FieldApproximationBase base)
Get derivatives of base functions for Hdiv space.
virtual std::array< boost::shared_ptr< MatrixDouble >, MaxBernsteinBezierOrder > & getBBDiffNByOrderArray()
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
Data on single entity (This is passed as argument to DataOperator::doWork)
FTensor::Index< 'j', 2 > j
Definition: ContactOps.hpp:27
FTensor::Index< 'j', 3 > j
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:413
FTensor::Index< 'j', 3 > j
ublas::vector< double, DoubleAllocator > VectorDouble
Definition: Types.hpp:73
MatrixDouble & getN(const FieldApproximationBase base)
get base functions this return matrix (nb. of rows is equal to nb. of Gauss pts, nb....
const VectorDouble & getFieldData() const
get dofs values
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Tensor1< FTensor::PackPtr< double *, Tensor_Dim >, Tensor_Dim > getFTensor1N(FieldApproximationBase base)
Get base functions for Hdiv/Hcurl spaces.
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Tensor0< FTensor::PackPtr< double *, 1 > > getFTensor0N(const FieldApproximationBase base)
Get base function as Tensor0.
MatrixDouble & getDiffN(const FieldApproximationBase base)
get derivatives of base functions
FTensor::Index< 'i', 2 > i
[Common data]
Definition: ContactOps.hpp:26
constexpr std::enable_if<(Dim0<=2 &&Dim1<=2), Tensor2_Expr< Levi_Civita< T >, T, Dim0, Dim1, i, j > >::type levi_civita(const Index< i, Dim0 > &, const Index< j, Dim1 > &)
levi_civita functions to make for easy adhoc use