v0.9.0
DataOperators.cpp
Go to the documentation of this file.
1 /** file DataOperators.cpp
2 
3  \brief implementation of Data Operators for Forces and Sources
4 
5 */
6 
7 /* This file is part of MoFEM.
8  * MoFEM is free software: you can redistribute it and/or modify it under
9  * the terms of the GNU Lesser General Public License as published by the
10  * Free Software Foundation, either version 3 of the License, or (at your
11  * option) any later version.
12  *
13  * MoFEM is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
16  * License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with MoFEM. If not, see <http://www.gnu.org/licenses/>. */
20 
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24 #include <cblas.h>
25 #include <lapack_wrap.h>
26 #include <gm_rule.h>
27 #ifdef __cplusplus
28 }
29 #endif
30 
31 namespace MoFEM {
32 
34  DataForcesAndSourcesCore &col_data,
35  bool symm) {
37 
38  // nodes
39  for (unsigned int nn = 0; nn != row_data.dataOnEntities[MBVERTEX].size();
40  nn++) {
41  unsigned int NN = 0;
42  if (symm)
43  NN = nn;
44  for (; NN != col_data.dataOnEntities[MBVERTEX].size(); NN++) {
45  ierr = doWork(nn, NN, MBVERTEX, MBVERTEX,
46  row_data.dataOnEntities[MBVERTEX][0],
47  col_data.dataOnEntities[MBVERTEX][0]);
48  CHKERRG(ierr);
49  }
50  if (!symm) {
51  for (unsigned int EE = 0; EE < col_data.dataOnEntities[MBEDGE].size();
52  EE++) {
53  if (col_data.dataOnEntities[MBEDGE][EE].getN().size1() == 0)
54  continue;
55  ierr = doWork(nn, EE, MBVERTEX, MBEDGE,
56  row_data.dataOnEntities[MBVERTEX][0],
57  col_data.dataOnEntities[MBEDGE][EE]);
58  CHKERRG(ierr);
59  }
60  for (unsigned int FF = 0; FF < col_data.dataOnEntities[MBTRI].size();
61  FF++) {
62  if (col_data.dataOnEntities[MBTRI][FF].getN().size1() == 0)
63  continue;
64  ierr = doWork(nn, FF, MBVERTEX, MBTRI,
65  row_data.dataOnEntities[MBVERTEX][0],
66  col_data.dataOnEntities[MBTRI][FF]);
67  CHKERRG(ierr);
68  }
69  for (unsigned int QQ = 0; QQ < col_data.dataOnEntities[MBQUAD].size();
70  QQ++) {
71  if (col_data.dataOnEntities[MBQUAD][QQ].getN().size1() == 0)
72  continue;
73  ierr = doWork(nn, QQ, MBVERTEX, MBQUAD,
74  row_data.dataOnEntities[MBVERTEX][0],
75  col_data.dataOnEntities[MBQUAD][QQ]);
76  CHKERRG(ierr);
77  }
78  }
79  for (unsigned int VV = 0; VV < col_data.dataOnEntities[MBTET].size();
80  VV++) {
81  if (col_data.dataOnEntities[MBTET][VV].getN().size1() == 0)
82  continue;
83  ierr =
84  doWork(nn, VV, MBVERTEX, MBTET, row_data.dataOnEntities[MBVERTEX][0],
85  col_data.dataOnEntities[MBTET][VV]);
86  CHKERRG(ierr);
87  }
88  for (unsigned int PP = 0; PP < col_data.dataOnEntities[MBPRISM].size();
89  PP++) {
90  if (col_data.dataOnEntities[MBPRISM][PP].getN().size1() == 0)
91  continue;
92  ierr = doWork(nn, PP, MBVERTEX, MBPRISM,
93  row_data.dataOnEntities[MBVERTEX][0],
94  col_data.dataOnEntities[MBPRISM][PP]);
95  CHKERRG(ierr);
96  }
97  for (unsigned int MM = 0; MM < col_data.dataOnEntities[MBENTITYSET].size();
98  MM++) {
99  if (row_data.dataOnEntities[MBENTITYSET][MM].getIndices().empty() &&
100  row_data.dataOnEntities[MBENTITYSET][MM].getFieldData().empty())
101  continue;
102  ierr = doWork(nn, MM, MBVERTEX, MBENTITYSET,
103  row_data.dataOnEntities[MBVERTEX][0],
104  col_data.dataOnEntities[MBENTITYSET][MM]);
105  CHKERRG(ierr);
106  }
107  }
108 
109  // edges
110  for (unsigned int ee = 0; ee < row_data.dataOnEntities[MBEDGE].size(); ee++) {
111  if (row_data.dataOnEntities[MBEDGE][ee].getN().size1() == 0)
112  continue;
113  for (unsigned int NN = 0; NN != col_data.dataOnEntities[MBVERTEX].size();
114  NN++) {
115  ierr =
116  doWork(ee, NN, MBEDGE, MBVERTEX, row_data.dataOnEntities[MBEDGE][ee],
117  col_data.dataOnEntities[MBVERTEX][0]);
118  CHKERRG(ierr);
119  }
120  unsigned int EE = 0;
121  if (symm)
122  EE = ee;
123  for (; EE < col_data.dataOnEntities[MBEDGE].size(); EE++) {
124  if (col_data.dataOnEntities[MBEDGE][EE].getN().size1() == 0)
125  continue;
126  ierr = doWork(ee, EE, MBEDGE, MBEDGE, row_data.dataOnEntities[MBEDGE][ee],
127  col_data.dataOnEntities[MBEDGE][EE]);
128  CHKERRG(ierr);
129  }
130  // tris
131  for (unsigned int FF = 0; FF < col_data.dataOnEntities[MBTRI].size();
132  FF++) {
133  if (col_data.dataOnEntities[MBTRI][FF].getN().size1() == 0)
134  continue;
135  ierr = doWork(ee, FF, MBEDGE, MBTRI, row_data.dataOnEntities[MBEDGE][ee],
136  col_data.dataOnEntities[MBTRI][FF]);
137  CHKERRG(ierr);
138  }
139  // quad
140  for (unsigned int QQ = 0; QQ < col_data.dataOnEntities[MBQUAD].size();
141  QQ++) {
142  if (col_data.dataOnEntities[MBQUAD][QQ].getN().size1() == 0)
143  continue;
144  ierr = doWork(ee, QQ, MBEDGE, MBQUAD, row_data.dataOnEntities[MBEDGE][ee],
145  col_data.dataOnEntities[MBQUAD][QQ]);
146  CHKERRG(ierr);
147  }
148  // tet
149  for (unsigned int VV = 0; VV < col_data.dataOnEntities[MBTET].size();
150  VV++) {
151  if (col_data.dataOnEntities[MBTET][VV].getN().size1() == 0)
152  continue;
153  ierr = doWork(ee, VV, MBEDGE, MBTET, row_data.dataOnEntities[MBEDGE][ee],
154  col_data.dataOnEntities[MBTET][VV]);
155  CHKERRG(ierr);
156  }
157  // prism
158  for (unsigned int PP = 0; PP < col_data.dataOnEntities[MBPRISM].size();
159  PP++) {
160  if (col_data.dataOnEntities[MBPRISM][PP].getN().size1() == 0)
161  continue;
162  ierr =
163  doWork(ee, PP, MBEDGE, MBPRISM, row_data.dataOnEntities[MBEDGE][ee],
164  col_data.dataOnEntities[MBPRISM][PP]);
165  CHKERRG(ierr);
166  }
167  for (unsigned int MM = 0; MM < col_data.dataOnEntities[MBENTITYSET].size();
168  MM++) {
169  if (col_data.dataOnEntities[MBENTITYSET][MM].getIndices().empty() &&
170  col_data.dataOnEntities[MBENTITYSET][MM].getFieldData().empty())
171  continue;
172  ierr = doWork(ee, MM, MBEDGE, MBENTITYSET,
173  row_data.dataOnEntities[MBEDGE][ee],
174  col_data.dataOnEntities[MBENTITYSET][MM]);
175  CHKERRG(ierr);
176  }
177  }
178 
179  // tris
180  for (unsigned int ff = 0; ff < row_data.dataOnEntities[MBTRI].size(); ff++) {
181  if (row_data.dataOnEntities[MBTRI][ff].getN().size1() == 0)
182  continue;
183  for (unsigned int NN = 0; NN != col_data.dataOnEntities[MBVERTEX].size();
184  NN++) {
185  ierr = doWork(ff, NN, MBTRI, MBVERTEX, row_data.dataOnEntities[MBTRI][ff],
186  col_data.dataOnEntities[MBVERTEX][0]);
187  CHKERRG(ierr);
188  }
189  if (!symm) {
190  unsigned int EE = 0;
191  for (; EE < col_data.dataOnEntities[MBEDGE].size(); EE++) {
192  if (col_data.dataOnEntities[MBEDGE][EE].getN().size1() == 0)
193  continue;
194  ierr = doWork(ff, EE, MBTRI, MBEDGE, row_data.dataOnEntities[MBTRI][ff],
195  col_data.dataOnEntities[MBEDGE][EE]);
196  CHKERRG(ierr);
197  }
198  }
199  unsigned int FF = 0;
200  if (symm)
201  FF = ff;
202  for (; FF < col_data.dataOnEntities[MBTRI].size(); FF++) {
203  if (col_data.dataOnEntities[MBTRI][FF].getN().size1() == 0)
204  continue;
205  ierr = doWork(ff, FF, MBTRI, MBTRI, row_data.dataOnEntities[MBTRI][ff],
206  col_data.dataOnEntities[MBTRI][FF]);
207  CHKERRG(ierr);
208  }
209  for (unsigned int QQ = 0; QQ < col_data.dataOnEntities[MBQUAD].size();
210  QQ++) {
211  if (col_data.dataOnEntities[MBQUAD][QQ].getN().size1() == 0)
212  continue;
213  ierr = doWork(ff, QQ, MBTRI, MBQUAD, row_data.dataOnEntities[MBTRI][ff],
214  col_data.dataOnEntities[MBQUAD][QQ]);
215  CHKERRG(ierr);
216  }
217  for (unsigned int VV = 0; VV < col_data.dataOnEntities[MBTET].size();
218  VV++) {
219  if (col_data.dataOnEntities[MBTET][VV].getN().size1() == 0)
220  continue;
221  ierr = doWork(ff, VV, MBTRI, MBTET, row_data.dataOnEntities[MBTRI][ff],
222  col_data.dataOnEntities[MBTET][VV]);
223  CHKERRG(ierr);
224  }
225  for (unsigned int PP = 0; PP < col_data.dataOnEntities[MBPRISM].size();
226  PP++) {
227  if (col_data.dataOnEntities[MBPRISM][PP].getN().size1() == 0)
228  continue;
229  ierr = doWork(ff, PP, MBTRI, MBPRISM, row_data.dataOnEntities[MBTRI][ff],
230  col_data.dataOnEntities[MBPRISM][PP]);
231  CHKERRG(ierr);
232  }
233  for (unsigned int MM = 0; MM < col_data.dataOnEntities[MBENTITYSET].size();
234  MM++) {
235  if (col_data.dataOnEntities[MBENTITYSET][MM].getIndices().empty() &&
236  col_data.dataOnEntities[MBENTITYSET][MM].getFieldData().empty())
237  continue;
238  ierr =
239  doWork(ff, MM, MBTRI, MBENTITYSET, row_data.dataOnEntities[MBTRI][ff],
240  col_data.dataOnEntities[MBENTITYSET][MM]);
241  CHKERRG(ierr);
242  }
243  }
244 
245  // quads
246  for (unsigned int qq = 0; qq < row_data.dataOnEntities[MBQUAD].size(); qq++) {
247  if (row_data.dataOnEntities[MBQUAD][qq].getN().size1() == 0)
248  continue;
249  for (unsigned int NN = 0; NN != col_data.dataOnEntities[MBVERTEX].size();
250  NN++) {
251  ierr =
252  doWork(qq, NN, MBQUAD, MBVERTEX, row_data.dataOnEntities[MBQUAD][qq],
253  col_data.dataOnEntities[MBVERTEX][0]);
254  CHKERRG(ierr);
255  }
256  if (!symm) {
257  unsigned int EE = 0;
258  for (; EE < col_data.dataOnEntities[MBEDGE].size(); EE++) {
259  if (col_data.dataOnEntities[MBEDGE][EE].getN().size1() == 0)
260  continue;
261  ierr =
262  doWork(qq, EE, MBQUAD, MBEDGE, row_data.dataOnEntities[MBQUAD][qq],
263  col_data.dataOnEntities[MBEDGE][EE]);
264  CHKERRG(ierr);
265  }
266  unsigned int FF = 0;
267  for (; FF < col_data.dataOnEntities[MBTRI].size(); FF++) {
268  if (col_data.dataOnEntities[MBTRI][FF].getN().size1() == 0)
269  continue;
270  ierr =
271  doWork(qq, FF, MBQUAD, MBTRI, row_data.dataOnEntities[MBQUAD][qq],
272  col_data.dataOnEntities[MBTRI][FF]);
273  CHKERRG(ierr);
274  }
275  }
276  unsigned int QQ = 0;
277  if (symm)
278  QQ = qq;
279  for (; QQ < col_data.dataOnEntities[MBQUAD].size(); QQ++) {
280  if (col_data.dataOnEntities[MBQUAD][QQ].getN().size1() == 0)
281  continue;
282  ierr = doWork(qq, QQ, MBQUAD, MBQUAD, row_data.dataOnEntities[MBQUAD][qq],
283  col_data.dataOnEntities[MBQUAD][QQ]);
284  CHKERRG(ierr);
285  }
286  for (unsigned int PP = 0; PP < col_data.dataOnEntities[MBPRISM].size();
287  PP++) {
288  if (col_data.dataOnEntities[MBPRISM][PP].getN().size1() == 0)
289  continue;
290  ierr = doWork(qq, PP, MBTRI, MBPRISM, row_data.dataOnEntities[MBQUAD][qq],
291  col_data.dataOnEntities[MBPRISM][PP]);
292  CHKERRG(ierr);
293  }
294  for (unsigned int MM = 0; MM < col_data.dataOnEntities[MBENTITYSET].size();
295  MM++) {
296  if (col_data.dataOnEntities[MBENTITYSET][MM].getIndices().empty() &&
297  col_data.dataOnEntities[MBENTITYSET][MM].getFieldData().empty())
298  continue;
299  ierr = doWork(qq, MM, MBQUAD, MBENTITYSET,
300  row_data.dataOnEntities[MBQUAD][qq],
301  col_data.dataOnEntities[MBENTITYSET][MM]);
302  CHKERRG(ierr);
303  }
304  }
305 
306  // volumes
307  for (unsigned int vv = 0; vv < row_data.dataOnEntities[MBTET].size(); vv++) {
308  if (row_data.dataOnEntities[MBTET][vv].getN().size1() == 0)
309  continue;
310  if (!symm) {
311  // vertex
312  for (unsigned int NN = 0; NN != col_data.dataOnEntities[MBVERTEX].size();
313  NN++) {
314  ierr =
315  doWork(vv, NN, MBTET, MBVERTEX, row_data.dataOnEntities[MBTET][vv],
316  col_data.dataOnEntities[MBVERTEX][0]);
317  CHKERRG(ierr);
318  }
319  // edges
320  for (unsigned int EE = 0; EE < col_data.dataOnEntities[MBEDGE].size();
321  EE++) {
322  if (col_data.dataOnEntities[MBEDGE][EE].getN().size1() == 0)
323  continue;
324  ierr = doWork(vv, EE, MBTET, MBEDGE, row_data.dataOnEntities[MBTET][vv],
325  col_data.dataOnEntities[MBEDGE][EE]);
326  CHKERRG(ierr);
327  }
328  // faces
329  for (unsigned int FF = 0; FF < col_data.dataOnEntities[MBTRI].size();
330  FF++) {
331  if (col_data.dataOnEntities[MBTRI][FF].getN().size1() == 0)
332  continue;
333  ierr = doWork(vv, FF, MBTET, MBTRI, row_data.dataOnEntities[MBTET][vv],
334  col_data.dataOnEntities[MBTRI][FF]);
335  CHKERRG(ierr);
336  }
337  }
338  unsigned int VV = 0;
339  if (symm)
340  VV = vv;
341  for (; VV < col_data.dataOnEntities[MBTET].size(); VV++) {
342  if (col_data.dataOnEntities[MBTET][VV].getN().size1() == 0)
343  continue;
344  ierr = doWork(vv, VV, MBTET, MBTET, row_data.dataOnEntities[MBTET][vv],
345  col_data.dataOnEntities[MBTET][VV]);
346  CHKERRG(ierr);
347  }
348  for (unsigned int MM = 0; MM < col_data.dataOnEntities[MBENTITYSET].size();
349  MM++) {
350  if (col_data.dataOnEntities[MBENTITYSET][MM].getIndices().empty() &&
351  col_data.dataOnEntities[MBENTITYSET][MM].getFieldData().empty())
352  continue;
353  ierr =
354  doWork(vv, MM, MBTET, MBENTITYSET, row_data.dataOnEntities[MBTET][vv],
355  col_data.dataOnEntities[MBENTITYSET][MM]);
356  CHKERRG(ierr);
357  }
358  }
359 
360  for (unsigned int pp = 0; pp < row_data.dataOnEntities[MBPRISM].size();
361  pp++) {
362  if (row_data.dataOnEntities[MBPRISM][pp].getN().size1() == 0)
363  continue;
364  if (!symm) {
365  // vertex
366  for (unsigned int NN = 0; NN != col_data.dataOnEntities[MBVERTEX].size();
367  NN++) {
368  ierr = doWork(pp, NN, MBPRISM, MBVERTEX,
369  row_data.dataOnEntities[MBPRISM][pp],
370  col_data.dataOnEntities[MBVERTEX][0]);
371  CHKERRG(ierr);
372  }
373  // edges
374  for (unsigned int EE = 0; EE < col_data.dataOnEntities[MBEDGE].size();
375  EE++) {
376  if (col_data.dataOnEntities[MBEDGE][EE].getN().size1() == 0)
377  continue;
378  ierr = doWork(pp, EE, MBPRISM, MBEDGE,
379  row_data.dataOnEntities[MBPRISM][pp],
380  col_data.dataOnEntities[MBEDGE][EE]);
381  CHKERRG(ierr);
382  }
383  // faces
384  for (unsigned int FF = 0; FF < col_data.dataOnEntities[MBTRI].size();
385  FF++) {
386  if (col_data.dataOnEntities[MBTRI][FF].getN().size1() == 0)
387  continue;
388  ierr =
389  doWork(pp, FF, MBPRISM, MBTRI, row_data.dataOnEntities[MBPRISM][pp],
390  col_data.dataOnEntities[MBTRI][FF]);
391  CHKERRG(ierr);
392  }
393  // quads
394  for (unsigned int QQ = 0; QQ < col_data.dataOnEntities[MBQUAD].size();
395  QQ++) {
396  if (col_data.dataOnEntities[MBQUAD][QQ].getN().size1() == 0)
397  continue;
398  ierr = doWork(pp, QQ, MBPRISM, MBQUAD,
399  row_data.dataOnEntities[MBPRISM][pp],
400  col_data.dataOnEntities[MBQUAD][QQ]);
401  CHKERRG(ierr);
402  }
403  }
404  unsigned int PP = 0;
405  if (symm)
406  PP = pp;
407  for (; PP < col_data.dataOnEntities[MBPRISM].size(); PP++) {
408  if (col_data.dataOnEntities[MBPRISM][PP].getN().size1() == 0)
409  continue;
410  ierr =
411  doWork(pp, PP, MBPRISM, MBPRISM, row_data.dataOnEntities[MBPRISM][pp],
412  col_data.dataOnEntities[MBPRISM][PP]);
413  CHKERRG(ierr);
414  }
415  for (unsigned int MM = 0; MM < col_data.dataOnEntities[MBENTITYSET].size();
416  MM++) {
417  if (col_data.dataOnEntities[MBENTITYSET][MM].getIndices().empty() &&
418  col_data.dataOnEntities[MBENTITYSET][MM].getFieldData().empty())
419  continue;
420  ierr = doWork(pp, MM, MBPRISM, MBENTITYSET,
421  row_data.dataOnEntities[MBPRISM][pp],
422  col_data.dataOnEntities[MBENTITYSET][MM]);
423  CHKERRG(ierr);
424  }
425  }
426 
427  // meshsets
428  for (unsigned int mm = 0; mm < row_data.dataOnEntities[MBENTITYSET].size();
429  mm++) {
430  if (row_data.dataOnEntities[MBENTITYSET][mm].getIndices().empty() &&
431  row_data.dataOnEntities[MBENTITYSET][mm].getFieldData().empty())
432  continue;
433  if (!symm) {
434  // vertex
435  for (unsigned int NN = 0; NN != col_data.dataOnEntities[MBVERTEX].size();
436  NN++) {
437  ierr = doWork(mm, NN, MBENTITYSET, MBVERTEX,
438  row_data.dataOnEntities[MBENTITYSET][mm],
439  col_data.dataOnEntities[MBVERTEX][0]);
440  CHKERRG(ierr);
441  }
442  // edges
443  for (unsigned int EE = 0; EE < col_data.dataOnEntities[MBEDGE].size();
444  EE++) {
445  if (col_data.dataOnEntities[MBEDGE][EE].getN().size1() == 0)
446  continue;
447  ierr = doWork(mm, EE, MBENTITYSET, MBEDGE,
448  row_data.dataOnEntities[MBENTITYSET][mm],
449  col_data.dataOnEntities[MBEDGE][EE]);
450  CHKERRG(ierr);
451  }
452  // faces
453  for (unsigned int FF = 0; FF < col_data.dataOnEntities[MBTRI].size();
454  FF++) {
455  if (col_data.dataOnEntities[MBTRI][FF].getN().size1() == 0)
456  continue;
457  ierr = doWork(mm, FF, MBENTITYSET, MBTRI,
458  row_data.dataOnEntities[MBENTITYSET][mm],
459  col_data.dataOnEntities[MBTRI][FF]);
460  CHKERRG(ierr);
461  }
462  // quad
463  for (unsigned int QQ = 0; QQ < col_data.dataOnEntities[MBQUAD].size();
464  QQ++) {
465  if (col_data.dataOnEntities[MBQUAD][QQ].getN().size1() == 0)
466  continue;
467  ierr = doWork(mm, QQ, MBENTITYSET, MBQUAD,
468  row_data.dataOnEntities[MBENTITYSET][mm],
469  col_data.dataOnEntities[MBQUAD][QQ]);
470  CHKERRG(ierr);
471  }
472  // volume
473  for (unsigned int VV = 0; VV < col_data.dataOnEntities[MBTET].size();
474  VV++) {
475  ierr = doWork(mm, VV, MBENTITYSET, MBTET,
476  row_data.dataOnEntities[MBENTITYSET][mm],
477  col_data.dataOnEntities[MBTET][VV]);
478  CHKERRG(ierr);
479  }
480  for (unsigned int PP = 0; PP < col_data.dataOnEntities[MBPRISM].size();
481  PP++) {
482  ierr = doWork(mm, PP, MBENTITYSET, MBPRISM,
483  row_data.dataOnEntities[MBENTITYSET][mm],
484  col_data.dataOnEntities[MBPRISM][PP]);
485  CHKERRG(ierr);
486  }
487  }
488  unsigned int MM = 0;
489  if (symm)
490  MM = mm;
491  for (; MM < col_data.dataOnEntities[MBENTITYSET].size(); MM++) {
492  if (row_data.dataOnEntities[MBENTITYSET][MM].getIndices().empty() &&
493  row_data.dataOnEntities[MBENTITYSET][MM].getFieldData().empty())
494  continue;
495  ierr = doWork(mm, MM, MBENTITYSET, MBENTITYSET,
496  row_data.dataOnEntities[MBENTITYSET][mm],
497  col_data.dataOnEntities[MBENTITYSET][MM]);
498  CHKERRG(ierr);
499  }
500  }
501 
503 }
504 
505 template <>
506 MoFEMErrorCode invertTensor3by3<3, double, ublas::row_major, DoubleAllocator>(
507  MatrixDouble &jac_data, VectorDouble &det_data,
508  MatrixDouble &inv_jac_data) {
509 
511  auto A = getFTensor2FromMat<3, 3>(jac_data);
512  int nb_gauss_pts = jac_data.size2();
513  det_data.resize(nb_gauss_pts, false);
514  inv_jac_data.resize(3, nb_gauss_pts, false);
515  auto det = getFTensor0FromVec(det_data);
516  auto I = getFTensor2FromMat<3, 3>(inv_jac_data);
517  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
518  ierr = determinantTensor3by3(A, det);
519  CHKERRG(ierr);
520  ierr = invertTensor3by3(A, det, I);
521  CHKERRG(ierr);
522  ++A;
523  ++det;
524  ++I;
525  }
527 }
528 
530  const bool do_vertices, const bool do_edges,
531  const bool do_quads, const bool do_tris,
532  const bool do_tets, const bool do_prisms,
533  const bool error_if_no_base) {
535 
536  if (do_vertices) {
537  for (unsigned int nn = 0; nn < data.dataOnEntities[MBVERTEX].size(); nn++) {
538  if (error_if_no_base &&
539  data.dataOnEntities[MBVERTEX][nn].getFieldData().size() &&
540  (data.dataOnEntities[MBVERTEX][nn].getBase() == NOBASE ||
541  data.dataOnEntities[MBVERTEX][nn].getBase() == LASTBASE)) {
542  for (VectorDofs::iterator it =
543  data.dataOnEntities[MBVERTEX][nn].getFieldDofs().begin();
544  it != data.dataOnEntities[MBVERTEX][nn].getFieldDofs().end(); it++)
545  if ((*it) && (*it)->getActive()) {
546  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
547  "No base on Vertex and side %d", nn);
548  }
549  }
550  ierr = doWork(nn, MBVERTEX, data.dataOnEntities[MBVERTEX][nn]);
551  CHKERRG(ierr);
552  }
553  }
554  if (do_edges) {
555  for (unsigned int ee = 0; ee < data.dataOnEntities[MBEDGE].size(); ee++) {
556  if (error_if_no_base &&
557  data.dataOnEntities[MBEDGE][ee].getFieldData().size() &&
558  (data.dataOnEntities[MBEDGE][ee].getBase() == NOBASE ||
559  data.dataOnEntities[MBEDGE][ee].getBase() == LASTBASE)) {
560  for (VectorDofs::iterator it =
561  data.dataOnEntities[MBEDGE][ee].getFieldDofs().begin();
562  it != data.dataOnEntities[MBEDGE][ee].getFieldDofs().end(); it++)
563  if ((*it) && (*it)->getActive()) {
564  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
565  "No base on Edge and side %d", ee);
566  }
567  }
568  ierr = doWork(ee, MBEDGE, data.dataOnEntities[MBEDGE][ee]);
569  CHKERRG(ierr);
570  }
571  }
572  if (do_tris) {
573  for (unsigned int ff = 0; ff < data.dataOnEntities[MBTRI].size(); ff++) {
574  if (error_if_no_base &&
575  data.dataOnEntities[MBTRI][ff].getFieldData().size() &&
576  (data.dataOnEntities[MBTRI][ff].getBase() == NOBASE ||
577  data.dataOnEntities[MBTRI][ff].getBase() == LASTBASE)) {
578  for (VectorDofs::iterator it =
579  data.dataOnEntities[MBTRI][ff].getFieldDofs().begin();
580  it != data.dataOnEntities[MBTRI][ff].getFieldDofs().end(); it++)
581  if ((*it) && (*it)->getActive()) {
582  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
583  "No base on Triangle and side %d", ff);
584  }
585  }
586  ierr = doWork(ff, MBTRI, data.dataOnEntities[MBTRI][ff]);
587  CHKERRG(ierr);
588  }
589  }
590  if (do_quads) {
591  for (unsigned int qq = 0; qq < data.dataOnEntities[MBQUAD].size(); qq++) {
592  if (error_if_no_base &&
593  data.dataOnEntities[MBQUAD][qq].getFieldData().size() &&
594  (data.dataOnEntities[MBQUAD][qq].getBase() == NOBASE ||
595  data.dataOnEntities[MBQUAD][qq].getBase() == LASTBASE)) {
596  for (VectorDofs::iterator it =
597  data.dataOnEntities[MBQUAD][qq].getFieldDofs().begin();
598  it != data.dataOnEntities[MBQUAD][qq].getFieldDofs().end(); it++)
599  if ((*it) && (*it)->getActive()) {
600  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
601  "No base on Quad and side %d", qq);
602  }
603  }
604  ierr = doWork(qq, MBQUAD, data.dataOnEntities[MBQUAD][qq]);
605  CHKERRG(ierr);
606  }
607  }
608  if (do_tets) {
609  for (unsigned int vv = 0; vv < data.dataOnEntities[MBTET].size(); vv++) {
610  if (error_if_no_base &&
611  data.dataOnEntities[MBTET][vv].getFieldData().size() &&
612  (data.dataOnEntities[MBTET][vv].getBase() == NOBASE &&
613  data.dataOnEntities[MBTET][vv].getBase() == LASTBASE)) {
614  for (VectorDofs::iterator it =
615  data.dataOnEntities[MBTET][vv].getFieldDofs().begin();
616  it != data.dataOnEntities[MBTET][vv].getFieldDofs().end(); it++)
617  if ((*it) && (*it)->getActive()) {
618  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
619  "No base on Tet and side %d", vv);
620  }
621  }
622  ierr = doWork(vv, MBTET, data.dataOnEntities[MBTET][vv]);
623  CHKERRG(ierr);
624  }
625  }
626  if (do_prisms) {
627  for (unsigned int pp = 0; pp < data.dataOnEntities[MBPRISM].size(); pp++) {
628  if (error_if_no_base &&
629  data.dataOnEntities[MBPRISM][pp].getFieldData().size() &&
630  (data.dataOnEntities[MBPRISM][pp].getBase() == NOBASE ||
631  data.dataOnEntities[MBPRISM][pp].getBase() == LASTBASE)) {
632  for (VectorDofs::iterator it =
633  data.dataOnEntities[MBPRISM][pp].getFieldDofs().begin();
634  it != data.dataOnEntities[MBPRISM][pp].getFieldDofs().end(); it++)
635  if ((*it) && (*it)->getActive()) {
636  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
637  "No base on Prism and side %d", pp);
638  }
639  }
640  ierr = doWork(pp, MBPRISM, data.dataOnEntities[MBPRISM][pp]);
641  CHKERRG(ierr);
642  }
643  }
644  for (unsigned int mm = 0; mm < data.dataOnEntities[MBENTITYSET].size();
645  mm++) {
646  if (data.dataOnEntities[MBENTITYSET][mm].getIndices().empty() &&
647  data.dataOnEntities[MBENTITYSET][mm].getFieldData().empty())
648  continue;
649  ierr = doWork(mm, MBENTITYSET, data.dataOnEntities[MBENTITYSET][mm]);
650  CHKERRG(ierr);
651  }
652 
654 }
655 
656 MoFEMErrorCode OpSetInvJacH1::doWork(int side, EntityType type,
659 
660  auto transform_base = [&](MatrixDouble &diff_n,
661  const bool diff_at_gauss_ptr) {
663 
664  if (!diff_n.size1())
666  if (!diff_n.size2())
668 
669  const int nb_base_functions =
670  (diff_at_gauss_ptr || type != MBVERTEX) ? diff_n.size2() / 3 : 4;
671  const int nb_gauss_pts =
672  (diff_at_gauss_ptr || type != MBVERTEX) ? diff_n.size1() : 1;
673  diffNinvJac.resize(diff_n.size1(), diff_n.size2(), false);
674 
675  double *t_diff_n_ptr = &*diff_n.data().begin();
677  t_diff_n_ptr, &t_diff_n_ptr[1], &t_diff_n_ptr[2]);
678  double *t_inv_n_ptr = &*diffNinvJac.data().begin();
680  t_inv_n_ptr, &t_inv_n_ptr[1], &t_inv_n_ptr[2]);
681 
682  switch (type) {
683 
684  case MBVERTEX:
685  case MBEDGE:
686  case MBTRI:
687  case MBTET: {
688  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
689  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
690  t_inv_diff_n(i) = t_diff_n(j) * tInvJac(j, i);
691  ++t_diff_n;
692  ++t_inv_diff_n;
693  }
694  }
695 
696  } break;
697  default:
698  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED, "not implemented");
699  }
700 
701  diff_n.data().swap(diffNinvJac.data());
702 
704  };
705 
706  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
707  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
708  CHKERR transform_base(data.getDiffN(base), false);
709  }
710 
711  switch (type) {
712  case MBVERTEX:
713  for (auto &m : data.getBBDiffNMap())
714  CHKERR transform_base(*(m.second), true);
715  break;
716  default:
717  for (auto &ptr : data.getBBDiffNByOrderArray())
718  if (ptr)
719  CHKERR transform_base(*ptr, true);
720  }
721 
723 }
724 
726 OpSetInvJacHdivAndHcurl::doWork(int side, EntityType type,
729 
730  if (type != MBEDGE && type != MBTRI && type != MBTET)
732 
733  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
734 
735  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
736 
737  const unsigned int nb_gauss_pts = data.getDiffN(base).size1();
738  const unsigned int nb_base_functions = data.getDiffN(base).size2() / 9;
739  if (!nb_base_functions)
740  continue;
741 
742  diffHdivInvJac.resize(nb_gauss_pts, data.getDiffN(base).size2(), false);
743 
744  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
745  double *t_inv_diff_n_ptr = &*diffHdivInvJac.data().begin();
747  t_inv_diff_n_ptr, &t_inv_diff_n_ptr[HVEC0_1],
748  &t_inv_diff_n_ptr[HVEC0_2],
749 
750  &t_inv_diff_n_ptr[HVEC1_0], &t_inv_diff_n_ptr[HVEC1_1],
751  &t_inv_diff_n_ptr[HVEC1_2],
752 
753  &t_inv_diff_n_ptr[HVEC2_0], &t_inv_diff_n_ptr[HVEC2_1],
754  &t_inv_diff_n_ptr[HVEC2_2]);
755 
756  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
757  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
758  t_inv_diff_n(k, i) = t_diff_n(k, j) * tInvJac(j, i);
759  ++t_diff_n;
760  ++t_inv_diff_n;
761  }
762  }
763 
764  data.getDiffN(base).data().swap(diffHdivInvJac.data());
765  }
766 
768 }
769 
771  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
773 
774  if (type != MBTRI && type != MBTET)
776 
777  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
778 
779  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
780 
781  const unsigned int nb_base_functions = data.getN(base).size2() / 3;
782  if (!nb_base_functions)
783  continue;
784 
785  const double c = 1. / 6.;
786  const unsigned int nb_gauss_pts = data.getN(base).size1();
787 
788  double const a = c / vOlume;
789 
790  piolaN.resize(nb_gauss_pts, data.getN(base).size2(), false);
791  if (data.getN(base).size2() > 0) {
792  auto t_n = data.getFTensor1N<3>(base);
793  double *t_transformed_n_ptr = &*piolaN.data().begin();
795  t_transformed_n_ptr, // HVEC0
796  &t_transformed_n_ptr[HVEC1], &t_transformed_n_ptr[HVEC2]);
797  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
798  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
799  t_transformed_n(i) = a * tJac(i, k) * t_n(k);
800  ++t_n;
801  ++t_transformed_n;
802  }
803  }
804  data.getN(base).data().swap(piolaN.data());
805  }
806 
807  piolaDiffN.resize(nb_gauss_pts, data.getDiffN(base).size2(), false);
808  if (data.getDiffN(base).size2() > 0) {
809  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
810  double *t_transformed_diff_n_ptr = &*piolaDiffN.data().begin();
812  t_transformed_diff_n(t_transformed_diff_n_ptr,
813  &t_transformed_diff_n_ptr[HVEC0_1],
814  &t_transformed_diff_n_ptr[HVEC0_2],
815  &t_transformed_diff_n_ptr[HVEC1_0],
816  &t_transformed_diff_n_ptr[HVEC1_1],
817  &t_transformed_diff_n_ptr[HVEC1_2],
818  &t_transformed_diff_n_ptr[HVEC2_0],
819  &t_transformed_diff_n_ptr[HVEC2_1],
820  &t_transformed_diff_n_ptr[HVEC2_2]);
821  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
822  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
823  t_transformed_diff_n(i, k) = a * tJac(i, j) * t_diff_n(j, k);
824  ++t_diff_n;
825  ++t_transformed_diff_n;
826  }
827  }
828  data.getDiffN(base).data().swap(piolaDiffN.data());
829  }
830  }
831 
833 }
834 
836 OpSetCovariantPiolaTransform::doWork(int side, EntityType type,
839 
840  if (type != MBEDGE && type != MBTRI && type != MBTET)
842 
843  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
844 
845  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
846 
847  const unsigned int nb_base_functions = data.getN(base).size2() / 3;
848  if (!nb_base_functions)
849  continue;
850 
851  const unsigned int nb_gauss_pts = data.getN(base).size1();
852  piolaN.resize(nb_gauss_pts, data.getN(base).size2(), false);
853  piolaDiffN.resize(nb_gauss_pts, data.getDiffN(base).size2(), false);
854 
855  auto t_n = data.getFTensor1N<3>(base);
856  double *t_transformed_n_ptr = &*piolaN.data().begin();
858  t_transformed_n_ptr, &t_transformed_n_ptr[HVEC1],
859  &t_transformed_n_ptr[HVEC2]);
860  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
861  double *t_transformed_diff_n_ptr = &*piolaDiffN.data().begin();
862  FTensor::Tensor2<FTensor::PackPtr<double *, 9>, 3, 3> t_transformed_diff_n(
863  t_transformed_diff_n_ptr, &t_transformed_diff_n_ptr[HVEC0_1],
864  &t_transformed_diff_n_ptr[HVEC0_2], &t_transformed_diff_n_ptr[HVEC1_0],
865  &t_transformed_diff_n_ptr[HVEC1_1], &t_transformed_diff_n_ptr[HVEC1_2],
866  &t_transformed_diff_n_ptr[HVEC2_0], &t_transformed_diff_n_ptr[HVEC2_1],
867  &t_transformed_diff_n_ptr[HVEC2_2]);
868 
869  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
870  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
871  t_transformed_n(i) = tInvJac(k, i) * t_n(k);
872  t_transformed_diff_n(i, k) = tInvJac(j, i) * t_diff_n(j, k);
873  ++t_n;
874  ++t_transformed_n;
875  ++t_diff_n;
876  ++t_transformed_diff_n;
877  }
878  }
879  data.getN(base).data().swap(piolaN.data());
880  data.getDiffN(base).data().swap(piolaDiffN.data());
881  }
882 
883  // data.getBase() = base;
884 
886 }
887 
889 OpSetHoInvJacH1::doWork(int side, EntityType type,
892 
893  if (invHoJac.size2() != 9)
894  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
895  "It looks that ho inverse of Jacobian is not calculated %d != 9",
896  invHoJac.size2());
897 
898  auto transform_base = [&](MatrixDouble &diff_n) {
900 
901  unsigned int nb_gauss_pts = diff_n.size1();
902  if (nb_gauss_pts == 0)
904 
905  if (invHoJac.size1() == nb_gauss_pts) {
906 
907  unsigned int nb_base_functions = diff_n.size2() / 3;
908  if (nb_base_functions == 0)
910 
911  double *t_inv_jac_ptr = &*invHoJac.data().begin();
913  t_inv_jac_ptr, &t_inv_jac_ptr[1], &t_inv_jac_ptr[2],
914  &t_inv_jac_ptr[3], &t_inv_jac_ptr[4], &t_inv_jac_ptr[5],
915  &t_inv_jac_ptr[6], &t_inv_jac_ptr[7], &t_inv_jac_ptr[8], 9);
916 
917  diffNinvJac.resize(nb_gauss_pts, 3 * nb_base_functions, false);
918 
919  double *t_diff_n_ptr = &*diff_n.data().begin();
921  t_diff_n_ptr, &t_diff_n_ptr[1], &t_diff_n_ptr[2]);
922  double *t_inv_n_ptr = &*diffNinvJac.data().begin();
923  FTensor::Tensor1<double *, 3> t_inv_diff_n(t_inv_n_ptr, &t_inv_n_ptr[1],
924  &t_inv_n_ptr[2], 3);
925 
926  switch (type) {
927  case MBVERTEX:
928  case MBEDGE:
929  case MBTRI:
930  case MBTET: {
931  for (unsigned int gg = 0; gg < nb_gauss_pts; ++gg) {
932  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
933  t_inv_diff_n(i) = t_diff_n(j) * t_inv_jac(j, i);
934  ++t_diff_n;
935  ++t_inv_diff_n;
936  }
937  ++t_inv_jac;
938  }
939  } break;
940  default:
941  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED, "not implemented");
942  }
943 
944  diff_n.data().swap(diffNinvJac.data());
945  }
947  };
948 
949  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
950  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
951  CHKERR transform_base(data.getDiffN(base));
952  }
953 
954  switch (type) {
955  case MBVERTEX:
956  for (auto &m : data.getBBDiffNMap())
957  CHKERR transform_base(*(m.second));
958  break;
959  default:
960  for (auto &ptr : data.getBBDiffNByOrderArray())
961  if (ptr)
962  CHKERR transform_base(*ptr);
963  }
964 
966 }
967 
969 OpSetHoInvJacHdivAndHcurl::doWork(int side, EntityType type,
972 
973  if (type != MBEDGE && type != MBTRI && type != MBTET)
975 
976  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
977 
978  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
979 
980  diffHdivInvJac.resize(data.getDiffN(base).size1(),
981  data.getDiffN(base).size2(), false);
982 
983  unsigned int nb_gauss_pts = data.getDiffN(base).size1();
984  unsigned int nb_base_functions = data.getDiffN(base).size2() / 9;
985  if (nb_base_functions == 0)
986  continue;
987 
988  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
989  double *t_inv_diff_n_ptr = &*diffHdivInvJac.data().begin();
991  t_inv_diff_n_ptr, &t_inv_diff_n_ptr[HVEC0_1],
992  &t_inv_diff_n_ptr[HVEC0_2], &t_inv_diff_n_ptr[HVEC1_0],
993  &t_inv_diff_n_ptr[HVEC1_1], &t_inv_diff_n_ptr[HVEC1_2],
994  &t_inv_diff_n_ptr[HVEC2_0], &t_inv_diff_n_ptr[HVEC2_1],
995  &t_inv_diff_n_ptr[HVEC2_2]);
996  double *t_inv_jac_ptr = &*invHoJac.data().begin();
998  t_inv_jac_ptr, &t_inv_jac_ptr[1], &t_inv_jac_ptr[2], &t_inv_jac_ptr[3],
999  &t_inv_jac_ptr[4], &t_inv_jac_ptr[5], &t_inv_jac_ptr[6],
1000  &t_inv_jac_ptr[7], &t_inv_jac_ptr[8]);
1001 
1002  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1003  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
1004  t_inv_diff_n(i, j) = t_inv_jac(k, j) * t_diff_n(i, k);
1005  ++t_diff_n;
1006  ++t_inv_diff_n;
1007  }
1008  ++t_inv_jac;
1009  }
1010 
1011  data.getDiffN(base).data().swap(diffHdivInvJac.data());
1012  }
1013 
1015 }
1016 
1018  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1020 
1021  if (type != MBTRI && type != MBTET)
1023 
1024  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
1025 
1026  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
1027 
1028  unsigned int nb_gauss_pts = data.getN(base).size1();
1029  unsigned int nb_base_functions = data.getN(base).size2() / 3;
1030  piolaN.resize(nb_gauss_pts, data.getN(base).size2(), false);
1031  piolaDiffN.resize(nb_gauss_pts, data.getDiffN(base).size2(), false);
1032 
1033  auto t_n = data.getFTensor1N<3>(base);
1034  double *t_transformed_n_ptr = &*piolaN.data().begin();
1036  t_transformed_n_ptr, // HVEC0
1037  &t_transformed_n_ptr[HVEC1], &t_transformed_n_ptr[HVEC2]);
1038  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
1039  double *t_transformed_diff_n_ptr = &*piolaDiffN.data().begin();
1040  FTensor::Tensor2<FTensor::PackPtr<double *, 9>, 3, 3> t_transformed_diff_n(
1041  t_transformed_diff_n_ptr, &t_transformed_diff_n_ptr[HVEC0_1],
1042  &t_transformed_diff_n_ptr[HVEC0_2], &t_transformed_diff_n_ptr[HVEC1_0],
1043  &t_transformed_diff_n_ptr[HVEC1_1], &t_transformed_diff_n_ptr[HVEC1_2],
1044  &t_transformed_diff_n_ptr[HVEC2_0], &t_transformed_diff_n_ptr[HVEC2_1],
1045  &t_transformed_diff_n_ptr[HVEC2_2]);
1046 
1047  FTensor::Tensor0<double *> t_det(&*detHoJac.data().begin());
1048  double *t_jac_ptr = &*hoJac.data().begin();
1050  t_jac_ptr, &t_jac_ptr[1], &t_jac_ptr[2], &t_jac_ptr[3], &t_jac_ptr[4],
1051  &t_jac_ptr[5], &t_jac_ptr[6], &t_jac_ptr[7], &t_jac_ptr[8]);
1052 
1053  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1054  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
1055  const double a = 1. / t_det;
1056  t_transformed_n(i) = a * t_jac(i, k) * t_n(k);
1057  t_transformed_diff_n(i, k) = a * t_jac(i, j) * t_diff_n(j, k);
1058  ++t_n;
1059  ++t_transformed_n;
1060  ++t_diff_n;
1061  ++t_transformed_diff_n;
1062  }
1063  ++t_det;
1064  ++t_jac;
1065  }
1066 
1067  data.getN(base).data().swap(piolaN.data());
1068  data.getDiffN(base).data().swap(piolaDiffN.data());
1069  }
1070 
1072 }
1073 
1075  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1077 
1078  if (type != MBEDGE && type != MBTRI && type != MBTET)
1080 
1081  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
1082 
1083  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
1084 
1085  unsigned int nb_gauss_pts = data.getN(base).size1();
1086  unsigned int nb_base_functions = data.getN(base).size2() / 3;
1087  piolaN.resize(nb_gauss_pts, data.getN(base).size2(), false);
1088  piolaDiffN.resize(nb_gauss_pts, data.getDiffN(base).size2(), false);
1089 
1090  auto t_n = data.getFTensor1N<3>(base);
1091  double *t_transformed_n_ptr = &*piolaN.data().begin();
1093  t_transformed_n_ptr, // HVEC0
1094  &t_transformed_n_ptr[HVEC1], &t_transformed_n_ptr[HVEC2]);
1095  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
1096  double *t_transformed_diff_n_ptr = &*piolaDiffN.data().begin();
1097  FTensor::Tensor2<FTensor::PackPtr<double *, 9>, 3, 3> t_transformed_diff_n(
1098  t_transformed_diff_n_ptr, &t_transformed_diff_n_ptr[HVEC0_1],
1099  &t_transformed_diff_n_ptr[HVEC0_2], &t_transformed_diff_n_ptr[HVEC1_0],
1100  &t_transformed_diff_n_ptr[HVEC1_1], &t_transformed_diff_n_ptr[HVEC1_2],
1101  &t_transformed_diff_n_ptr[HVEC2_0], &t_transformed_diff_n_ptr[HVEC2_1],
1102  &t_transformed_diff_n_ptr[HVEC2_2]);
1103 
1104  double *t_inv_jac_ptr = &*hoInvJac.data().begin();
1106  t_inv_jac_ptr, &t_inv_jac_ptr[1], &t_inv_jac_ptr[2], &t_inv_jac_ptr[3],
1107  &t_inv_jac_ptr[4], &t_inv_jac_ptr[5], &t_inv_jac_ptr[6],
1108  &t_inv_jac_ptr[7], &t_inv_jac_ptr[8], 9);
1109 
1110  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1111  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
1112  t_transformed_n(i) = t_inv_jac(k, i) * t_n(k);
1113  t_transformed_diff_n(i, k) = t_inv_jac(j, i) * t_diff_n(j, k);
1114  ++t_n;
1115  ++t_transformed_n;
1116  ++t_diff_n;
1117  ++t_transformed_diff_n;
1118  }
1119  ++t_inv_jac;
1120  }
1121 
1122  data.getN(base).data().swap(piolaN.data());
1123  data.getDiffN(base).data().swap(piolaDiffN.data());
1124  }
1125 
1127 }
1128 
1130 OpGetCoordsAndNormalsOnFace::doWork(int side, EntityType type,
1133 
1134  unsigned int nb_dofs = data.getFieldData().size();
1135  if (nb_dofs == 0)
1137 
1138  int nb_gauss_pts = data.getN().size1();
1139  cOords_at_GaussPt.resize(nb_gauss_pts, 3, false);
1140  tAngent1_at_GaussPt.resize(nb_gauss_pts, 3, false);
1141  tAngent2_at_GaussPt.resize(nb_gauss_pts, 3, false);
1142 
1143  auto get_ftensor1 = [](MatrixDouble &m) {
1145  &m(0, 0), &m(0, 1), &m(0, 2));
1146  };
1147  auto t_coords = get_ftensor1(cOords_at_GaussPt);
1148  auto t_t1 = get_ftensor1(tAngent1_at_GaussPt);
1149  auto t_t2 = get_ftensor1(tAngent2_at_GaussPt);
1151  FTensor::Number<0> N0;
1152  FTensor::Number<1> N1;
1153 
1154  switch (type) {
1155  case MBVERTEX: {
1156  cOords_at_GaussPt.clear();
1157  tAngent1_at_GaussPt.clear();
1158  tAngent2_at_GaussPt.clear();
1159  }
1160  case MBEDGE:
1161  case MBTRI:
1162  case MBQUAD: {
1163  if (2 * data.getN().size2() != data.getDiffN().size2()) {
1164  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "data inconsistency");
1165  }
1166  if (nb_dofs % 3 != 0) {
1167  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "data inconsistency");
1168  }
1169  if (nb_dofs > 3 * data.getN().size2()) {
1170  unsigned int nn = 0;
1171  for (; nn != nb_dofs; nn++) {
1172  if (!data.getFieldDofs()[nn]->getActive())
1173  break;
1174  }
1175  if (nn > 3 * data.getN().size2()) {
1176  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1177  "data inconsistency for base %s",
1179  } else {
1180  nb_dofs = nn;
1181  if (!nb_dofs)
1183  }
1184  }
1185  const int nb_base_functions = data.getN().size2();
1186  auto t_base = data.getFTensor0N();
1187  auto t_diff_base = data.getFTensor1DiffN<2>();
1188  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1189  auto t_data = data.getFTensor1FieldData<3>();
1190  int bb = 0;
1191  for (; bb != nb_dofs / 3; ++bb) {
1192  t_coords(i) += t_base * t_data(i);
1193  t_t1(i) += t_data(i) * t_diff_base(N0);
1194  t_t2(i) += t_data(i) * t_diff_base(N1);
1195  ++t_data;
1196  ++t_base;
1197  ++t_diff_base;
1198  }
1199  for (; bb != nb_base_functions; ++bb) {
1200  ++t_base;
1201  ++t_diff_base;
1202  }
1203  ++t_coords;
1204  ++t_t1;
1205  ++t_t2;
1206  }
1207  } break;
1208  default:
1209  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED, "not implemented");
1210  }
1211 
1213 }
1214 
1217 
1218  nOrmals_at_GaussPt.resize(tAngent1_at_GaussPt.size1(), 3, false);
1219 
1220  auto get_ftensor1 = [](MatrixDouble &m) {
1222  &m(0, 0), &m(0, 1), &m(0, 2));
1223  };
1224  auto t_normal = get_ftensor1(nOrmals_at_GaussPt);
1225  auto t_t1 = get_ftensor1(tAngent1_at_GaussPt);
1226  auto t_t2 = get_ftensor1(tAngent2_at_GaussPt);
1227 
1231 
1232  for (unsigned int gg = 0; gg != tAngent1_at_GaussPt.size1(); ++gg) {
1233  t_normal(j) = FTensor::levi_civita(i, j, k) * t_t1(k) * t_t2(i);
1234  ++t_normal;
1235  ++t_t1;
1236  ++t_t2;
1237  }
1238 
1240 }
1241 
1243 OpGetCoordsAndNormalsOnPrism::doWork(int side, EntityType type,
1246 
1247  if (data.getFieldData().size() == 0)
1249  const int valid_edges3[] = {1, 1, 1, 0, 0, 0, 0, 0, 0};
1250  const int valid_faces3[] = {0, 0, 0, 1, 0, 0, 0, 0, 0};
1251  const int valid_edges4[] = {0, 0, 0, 0, 0, 0, 1, 1, 1};
1252  const int valid_faces4[] = {0, 0, 0, 0, 1, 0, 0, 0, 0};
1253 
1254  if (type == MBEDGE) {
1255  if (!valid_edges3[side] || valid_edges4[side])
1257  } else if (type == MBTRI) {
1258  if (!valid_faces3[side] || valid_faces4[side])
1260  }
1261 
1262  switch (type) {
1263  case MBVERTEX: {
1264  for (unsigned int gg = 0; gg < data.getN().size1(); ++gg) {
1265  for (int dd = 0; dd < 3; dd++) {
1266  cOords_at_GaussPtF3(gg, dd) =
1267  cblas_ddot(3, &data.getN(gg)[0], 1, &data.getFieldData()[dd], 3);
1269  3, &data.getDiffN()(gg, 0), 2, &data.getFieldData()[dd], 3);
1271  3, &data.getDiffN()(gg, 1), 2, &data.getFieldData()[dd], 3);
1273  3, &data.getN(gg)[0], 1, &data.getFieldData()[9 + dd], 3);
1275  3, &data.getDiffN()(gg, 6 + 0), 2, &data.getFieldData()[9 + dd], 3);
1277  3, &data.getDiffN()(gg, 6 + 1), 2, &data.getFieldData()[9 + dd], 3);
1278  }
1279  }
1280  } break;
1281  case MBEDGE:
1282  case MBTRI: {
1283  if (2 * data.getN().size2() != data.getDiffN().size2()) {
1284  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "data inconsistency");
1285  }
1286  unsigned int nb_dofs = data.getFieldData().size();
1287  if (nb_dofs % 3 != 0) {
1288  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "data inconsistency");
1289  }
1290  if (nb_dofs > 3 * data.getN().size2()) {
1291  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1292  "data inconsistency, side %d type %d", side, type);
1293  }
1294  for (unsigned int gg = 0; gg < data.getN().size1(); ++gg) {
1295  for (int dd = 0; dd < 3; dd++) {
1296  if ((type == MBTRI && valid_faces3[side]) ||
1297  (type == MBEDGE && valid_edges3[side])) {
1299  nb_dofs / 3, &data.getN(gg)[0], 1, &data.getFieldData()[dd], 3);
1300  tAngent1_at_GaussPtF3(gg, dd) +=
1301  cblas_ddot(nb_dofs / 3, &data.getDiffN()(gg, 0), 2,
1302  &data.getFieldData()[dd], 3);
1303  tAngent2_at_GaussPtF3(gg, dd) +=
1304  cblas_ddot(nb_dofs / 3, &data.getDiffN()(gg, 1), 2,
1305  &data.getFieldData()[dd], 3);
1306  } else if ((type == MBTRI && valid_faces4[side]) ||
1307  (type == MBEDGE && valid_edges4[side])) {
1309  nb_dofs / 3, &data.getN(gg)[0], 1, &data.getFieldData()[dd], 3);
1310  tAngent1_at_GaussPtF4(gg, dd) +=
1311  cblas_ddot(nb_dofs / 3, &data.getDiffN()(gg, 0), 2,
1312  &data.getFieldData()[dd], 3);
1313  tAngent2_at_GaussPtF4(gg, dd) +=
1314  cblas_ddot(nb_dofs / 3, &data.getDiffN()(gg, 1), 2,
1315  &data.getFieldData()[dd], 3);
1316  }
1317  }
1318  }
1319  } break;
1320  default:
1321  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED, "not implemented");
1322  }
1323 
1325 }
1326 
1329 
1330  sPin.resize(3, 3);
1331  sPin.clear();
1332  nOrmals_at_GaussPtF3.resize(tAngent1_at_GaussPtF3.size1(), 3, false);
1333  for (unsigned int gg = 0; gg < tAngent1_at_GaussPtF3.size1(); ++gg) {
1334  ierr = Spin(&*sPin.data().begin(), &tAngent1_at_GaussPtF3(gg, 0));
1335  CHKERRG(ierr);
1336  cblas_dgemv(CblasRowMajor, CblasNoTrans, 3, 3, 1., &*sPin.data().begin(), 3,
1337  &tAngent2_at_GaussPtF3(gg, 0), 1, 0.,
1338  &nOrmals_at_GaussPtF3(gg, 0), 1);
1339  }
1340  sPin.clear();
1341  nOrmals_at_GaussPtF4.resize(tAngent1_at_GaussPtF4.size1(), 3, false);
1342  for (unsigned int gg = 0; gg < tAngent1_at_GaussPtF4.size1(); ++gg) {
1343  ierr = Spin(&*sPin.data().begin(), &tAngent1_at_GaussPtF4(gg, 0));
1344  CHKERRG(ierr);
1345  cblas_dgemv(CblasRowMajor, CblasNoTrans, 3, 3, 1., &*sPin.data().begin(), 3,
1346  &tAngent2_at_GaussPtF4(gg, 0), 1, 0.,
1347  &nOrmals_at_GaussPtF4(gg, 0), 1);
1348  }
1350 }
1351 
1353  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1355 
1356  if (type != MBTRI)
1358 
1359  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
1360 
1361  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
1362 
1363  int nb_gauss_pts = data.getN(base).size1();
1364  if (nb_gauss_pts) {
1366  auto t_normal =
1368  const double l02 = t_normal(i) * t_normal(i);
1369  int nb_base_functions = data.getN(base).size2() / 3;
1370  auto t_n = data.getFTensor1N<3>(base);
1371  if (normalsAtGaussPts.size1() == (unsigned int)nb_gauss_pts) {
1372  auto t_ho_normal =
1374  &normalsAtGaussPts(0, 0), &normalsAtGaussPts(0, 1),
1375  &normalsAtGaussPts(0, 2));
1376  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1377  for (int bb = 0; bb != nb_base_functions; ++bb) {
1378  const double v = t_n(0);
1379  const double l2 = t_ho_normal(i) * t_ho_normal(i);
1380  t_n(i) = (v / l2) * t_ho_normal(i);
1381  ++t_n;
1382  }
1383  ++t_ho_normal;
1384  }
1385  } else {
1386  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1387  for (int bb = 0; bb != nb_base_functions; ++bb) {
1388  const double v = t_n(0);
1389  t_n(i) = (v / l02) * t_normal(i);
1390  ++t_n;
1391  }
1392  }
1393  }
1394  }
1395  }
1396 
1398 }
1399 
1401  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1403 
1404  if (type != MBEDGE && type != MBTRI)
1406 
1410 
1412  &tAngent0[0], &tAngent1[0], &nOrmal[0],
1413 
1414  &tAngent0[1], &tAngent1[1], &nOrmal[1],
1415 
1416  &tAngent0[2], &tAngent1[2], &nOrmal[2],
1417 
1418  3);
1419  double det;
1421  CHKERR determinantTensor3by3(t_m, det);
1422  CHKERR invertTensor3by3(t_m, det, t_inv_m);
1423 
1424  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; ++b) {
1425 
1426  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
1427 
1428  auto &baseN = data.getN(base);
1429  auto &diffBaseN = data.getDiffN(base);
1430 
1431  int nb_dofs = baseN.size2() / 3;
1432  int nb_gauss_pts = baseN.size1();
1433 
1434  MatrixDouble piola_n(baseN.size1(), baseN.size2());
1435  MatrixDouble diff_piola_n(diffBaseN.size1(), diffBaseN.size2());
1436 
1437  if (nb_dofs > 0 && nb_gauss_pts > 0) {
1438 
1440  &baseN(0, HVEC0), &baseN(0, HVEC1), &baseN(0, HVEC2));
1442  &diffBaseN(0, HVEC0_0), &diffBaseN(0, HVEC0_1),
1443  &diffBaseN(0, HVEC1_0), &diffBaseN(0, HVEC1_1),
1444  &diffBaseN(0, HVEC2_0), &diffBaseN(0, HVEC2_1));
1445  FTensor::Tensor1<FTensor::PackPtr<double *, 3>, 3> t_transformed_h_curl(
1446  &piola_n(0, HVEC0), &piola_n(0, HVEC1), &piola_n(0, HVEC2));
1448  t_transformed_diff_h_curl(
1449  &diff_piola_n(0, HVEC0_0), &diff_piola_n(0, HVEC0_1),
1450  &diff_piola_n(0, HVEC1_0), &diff_piola_n(0, HVEC1_1),
1451  &diff_piola_n(0, HVEC2_0), &diff_piola_n(0, HVEC2_1));
1452 
1453  int cc = 0;
1454  if (normalsAtGaussPts.size1() == (unsigned int)nb_gauss_pts) {
1455  // HO geometry is set, so jacobian is different at each gauss point
1457  &tangent0AtGaussPt(0, 0), &tangent1AtGaussPt(0, 0),
1458  &normalsAtGaussPts(0, 0), &tangent0AtGaussPt(0, 1),
1459  &tangent1AtGaussPt(0, 1), &normalsAtGaussPts(0, 1),
1460  &tangent0AtGaussPt(0, 2), &tangent1AtGaussPt(0, 2),
1461  &normalsAtGaussPts(0, 2), 3);
1462  for (int gg = 0; gg < nb_gauss_pts; ++gg) {
1463  CHKERR determinantTensor3by3(t_m_at_pts, det);
1464  CHKERR invertTensor3by3(t_m_at_pts, det, t_inv_m);
1465  for (int ll = 0; ll != nb_dofs; ll++) {
1466  t_transformed_h_curl(i) = t_inv_m(j, i) * t_h_curl(j);
1467  t_transformed_diff_h_curl(i, k) =
1468  t_inv_m(j, i) * t_diff_h_curl(j, k);
1469  ++t_h_curl;
1470  ++t_transformed_h_curl;
1471  ++t_diff_h_curl;
1472  ++t_transformed_diff_h_curl;
1473  ++cc;
1474  }
1475  ++t_m_at_pts;
1476  }
1477  } else {
1478  for (int gg = 0; gg < nb_gauss_pts; ++gg) {
1479  for (int ll = 0; ll != nb_dofs; ll++) {
1480  t_transformed_h_curl(i) = t_inv_m(j, i) * t_h_curl(j);
1481  t_transformed_diff_h_curl(i, k) =
1482  t_inv_m(j, i) * t_diff_h_curl(j, k);
1483  ++t_h_curl;
1484  ++t_transformed_h_curl;
1485  ++t_diff_h_curl;
1486  ++t_transformed_diff_h_curl;
1487  ++cc;
1488  }
1489  }
1490  }
1491  if (cc != nb_gauss_pts * nb_dofs)
1492  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSIBLE_CASE, "Data inconsistency");
1493 
1494  baseN.data().swap(piola_n.data());
1495  diffBaseN.data().swap(diff_piola_n.data());
1496  }
1497  }
1498 
1500 }
1501 
1503 OpGetHoTangentOnEdge::doWork(int side, EntityType type,
1506 
1507  int nb_dofs = data.getFieldData().size();
1508  if (nb_dofs == 0)
1510 
1511  int nb_gauss_pts = data.getN().size1();
1512  tAngent.resize(nb_gauss_pts, 3, false);
1513 
1514  int nb_approx_fun = data.getN().size2();
1515  double *diff = &*data.getDiffN().data().begin();
1516  double *dofs[] = {&data.getFieldData()[0], &data.getFieldData()[1],
1517  &data.getFieldData()[2]};
1518 
1519  tAngent.resize(nb_gauss_pts, 3, false);
1520 
1521  switch (type) {
1522  case MBVERTEX:
1523  for (int dd = 0; dd != 3; dd++) {
1524  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1525  tAngent(gg, dd) = cblas_ddot(2, diff, 1, dofs[dd], 3);
1526  }
1527  }
1528  break;
1529  case MBEDGE:
1530  if (nb_dofs % 3) {
1531  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSIBLE_CASE,
1532  "Approximated field should be rank 3, i.e. vector in 3d space");
1533  }
1534  for (int dd = 0; dd != 3; dd++) {
1535  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1536  tAngent(gg, dd) +=
1537  cblas_ddot(nb_dofs / 3, &diff[gg * nb_approx_fun], 1, dofs[dd], 3);
1538  }
1539  }
1540  break;
1541  default:
1542  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSIBLE_CASE,
1543  "This operator can calculate tangent vector only on edge");
1544  }
1545 
1547 }
1548 
1550  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1552 
1553  if (type != MBEDGE)
1555 
1558  &tAngent[0], &tAngent[1], &tAngent[2]);
1559  const double l0 = t_m(i) * t_m(i);
1560 
1561  auto get_base_at_pts = [&](auto base) {
1563  &data.getN(base)(0, HVEC0), &data.getN(base)(0, HVEC1),
1564  &data.getN(base)(0, HVEC2));
1565  return t_h_curl;
1566  };
1567 
1568  auto get_tangent_at_pts = [&]() {
1570  &tangentAtGaussPt(0, 0), &tangentAtGaussPt(0, 1),
1571  &tangentAtGaussPt(0, 2));
1572  return t_m_at_pts;
1573  };
1574 
1575  auto calculate_squared_edge_length = [&]() {
1576  std::vector<double> l1;
1577  int nb_gauss_pts = tangentAtGaussPt.size1();
1578  if (nb_gauss_pts) {
1579  l1.resize(nb_gauss_pts);
1580  auto t_m_at_pts = get_tangent_at_pts();
1581  for (size_t gg = 0; gg != nb_gauss_pts; ++gg) {
1582  l1[gg] = t_m_at_pts(i) * t_m_at_pts(i);
1583  ++t_m_at_pts;
1584  }
1585  }
1586  return l1;
1587  };
1588 
1589  auto l1 = calculate_squared_edge_length();
1590 
1591  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
1592 
1593  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
1594  const size_t nb_gauss_pts = data.getN(base).size1();
1595  const size_t nb_dofs = data.getN(base).size2() / 3;
1596  if (nb_gauss_pts && nb_dofs) {
1597  auto t_h_curl = get_base_at_pts(base);
1598  int cc = 0;
1599  if (tangentAtGaussPt.size1() == nb_gauss_pts) {
1600  auto t_m_at_pts = get_tangent_at_pts();
1601  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1602  const double l0 = l1[gg];
1603  for (int ll = 0; ll != nb_dofs; ll++) {
1604  const double val = t_h_curl(0);
1605  const double a = val / l0;
1606  t_h_curl(i) = t_m_at_pts(i) * a;
1607  ++t_h_curl;
1608  ++cc;
1609  }
1610  ++t_m_at_pts;
1611  }
1612  } else {
1613  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1614  for (int ll = 0; ll != nb_dofs; ll++) {
1615  const double val = t_h_curl(0);
1616  const double a = val / l0;
1617  t_h_curl(i) = t_m(i) * a;
1618  ++t_h_curl;
1619  ++cc;
1620  }
1621  }
1622  }
1623 
1624  if (cc != nb_gauss_pts * nb_dofs)
1625  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSIBLE_CASE, "Data inconsistency");
1626  }
1627  }
1628 
1630 }
1631 
1632 template <>
1633 template <>
1636  double *ptr = &*data.data().begin();
1637  return FTensor::Tensor1<double *, 3>(ptr, &ptr[1], &ptr[2], 3);
1638 }
1639 
1640 template <>
1641 template <>
1644  double *ptr = &*data.data().begin();
1645  return FTensor::Tensor2<double *, 3, 3>(ptr, &ptr[1], &ptr[2], &ptr[3],
1646  &ptr[4], &ptr[5], &ptr[6], &ptr[7],
1647  &ptr[8], 9);
1648 }
1649 
1650 template <>
1652  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1654  if (data.getBase() == NOBASE)
1656  const unsigned int nb_gauss_pts = data.getN().size1();
1657  const unsigned int nb_base_functions = data.getN().size2();
1658  const unsigned int nb_dofs = data.getFieldData().size();
1659  if (!nb_dofs)
1661  auto t_n = data.getFTensor0N();
1662  auto t_val = getValAtGaussPtsTensor<3>(dataAtGaussPts);
1663  auto t_grad = getGradAtGaussPtsTensor<3, 3>(dataGradAtGaussPts);
1666  if (type == MBVERTEX &&
1667  data.getDiffN().data().size() == 3 * nb_base_functions) {
1668  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1669  auto t_data = data.getFTensor1FieldData<3>();
1670  auto t_diff_n = data.getFTensor1DiffN<3>();
1671  unsigned int bb = 0;
1672  for (; bb != nb_dofs / 3; ++bb) {
1673  t_val(i) += t_data(i) * t_n;
1674  t_grad(i, j) += t_data(i) * t_diff_n(j);
1675  ++t_n;
1676  ++t_diff_n;
1677  ++t_data;
1678  }
1679  ++t_val;
1680  ++t_grad;
1681  for (; bb != nb_base_functions; ++bb) {
1682  ++t_n;
1683  }
1684  }
1685  } else {
1686  auto t_diff_n = data.getFTensor1DiffN<3>();
1687  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1688  auto t_data = data.getFTensor1FieldData<3>();
1689  unsigned int bb = 0;
1690  for (; bb != nb_dofs / 3; ++bb) {
1691  t_val(i) += t_data(i) * t_n;
1692  t_grad(i, j) += t_data(i) * t_diff_n(j);
1693  ++t_n;
1694  ++t_diff_n;
1695  ++t_data;
1696  }
1697  ++t_val;
1698  ++t_grad;
1699  for (; bb != nb_base_functions; ++bb) {
1700  ++t_n;
1701  ++t_diff_n;
1702  }
1703  }
1704  }
1706 }
1707 
1708 template <>
1710  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1712  const unsigned int nb_gauss_pts = data.getN().size1();
1713  const unsigned int nb_base_functions = data.getN().size2();
1714  // bool constant_diff = false;
1715  const unsigned int nb_dofs = data.getFieldData().size();
1716  auto t_n = data.getFTensor0N();
1718  FTensor::Tensor0<double *>(&*dataAtGaussPts.data().begin(), 1);
1719  double *ptr = &*dataGradAtGaussPts.data().begin();
1720  FTensor::Tensor1<FTensor::PackPtr<double *, 3>, 3> t_grad(ptr, &ptr[1],
1721  &ptr[2]);
1723  if (type == MBVERTEX &&
1724  data.getDiffN().data().size() == 3 * nb_base_functions) {
1725  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1726  auto t_data = data.getFTensor0FieldData();
1727  auto t_diff_n = data.getFTensor1DiffN<3>();
1728  unsigned int bb = 0;
1729  for (; bb != nb_dofs / 3; ++bb) {
1730  t_val += t_data * t_n;
1731  t_grad(i) += t_data * t_diff_n(i);
1732  ++t_n;
1733  ++t_diff_n;
1734  ++t_data;
1735  }
1736  ++t_val;
1737  ++t_grad;
1738  for (; bb != nb_base_functions; ++bb) {
1739  ++t_n;
1740  }
1741  }
1742  } else {
1743  auto t_diff_n = data.getFTensor1DiffN<3>();
1744  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1745  auto t_data = data.getFTensor0FieldData();
1746  unsigned int bb = 0;
1747  for (; bb != nb_dofs / 3; ++bb) {
1748  t_val = t_data * t_n;
1749  t_grad(i) += t_data * t_diff_n(i);
1750  ++t_n;
1751  ++t_diff_n;
1752  ++t_data;
1753  }
1754  ++t_val;
1755  ++t_grad;
1756  for (; bb != nb_base_functions; ++bb) {
1757  ++t_n;
1758  ++t_diff_n;
1759  }
1760  }
1761  }
1763 }
1764 } // namespace MoFEM
MatrixDouble diffNinvJac
static FTensor::Tensor0< FTensor::PackPtr< double *, 1 > > getFTensor0FromVec(ublas::vector< T, A > &data)
Get tensor rank 0 (scalar) form data vector.
Definition: Templates.hpp:141
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FieldApproximationBase & getBase()
Get approximation base.
void cblas_dgemv(const enum CBLAS_ORDER order, const enum CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double *A, const int lda, const double *X, const int incX, const double beta, double *Y, const int incY)
Definition: cblas_dgemv.c:11
MoFEMErrorCode determinantTensor3by3(T1 &t, T2 &det)
Calculate determinant 3 by 3.
Definition: Templates.hpp:461
FTensor::Index< 'j', 3 > j
data structure for finite element entityIt keeps that about indices of degrees of freedom,...
PetscErrorCode Spin(double *spinOmega, double *vecOmega)
calculate spin matrix from vector
Definition: fem_tools.c:558
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Index< 'i', 3 > i
FTensor::Index< 'i', 3 > i
std::array< boost::ptr_vector< EntData >, MBMAXTYPE > dataOnEntities
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:501
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
ublas::matrix< double, ublas::row_major, DoubleAllocator > MatrixDouble
Definition: Types.hpp:74
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:477
#define CHKERRG(n)
Check error code of MoFEM/MOAB/PETSc function.
Definition: definitions.h:544
FTensor::Tensor2< double *, 3, 3 > tInvJac
FTensor::Tensor2< double *, 3, 3 > tInvJac
FTensor::Tensor1< double *, Tensor_Dim > getFTensor1DiffN(const FieldApproximationBase base)
Get derivatives of base functions.
virtual MoFEMErrorCode doWork(int row_side, int col_side, EntityType row_type, EntityType col_type, DataForcesAndSourcesCore::EntData &row_data, DataForcesAndSourcesCore::EntData &col_data)
Operator for bi-linear form, usually to calculate values on left hand side.
FTensor::Tensor2< double *, 3, 3 > tInvJac
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:508
FTensor::Index< 'i', 3 > i
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
implementation of Data Operators for Forces and Sources
Definition: Common.hpp:21
FTensor::Tensor0< FTensor::PackPtr< double *, 1 > > getFTensor0FieldData()
Resturn scalar files as a FTensor of rank 0.
virtual std::map< std::string, boost::shared_ptr< MatrixDouble > > & getBBDiffNMap()
double cblas_ddot(const int N, const double *X, const int incX, const double *Y, const int incY)
Definition: cblas_ddot.c:12
FTensor::Index< 'i', 3 > i
static const char *const ApproximationBaseNames[]
Definition: definitions.h:158
MoFEMErrorCode invertTensor3by3(ublas::matrix< T, L, A > &jac_data, ublas::vector< T, A > &det_data, ublas::matrix< T, L, A > &inv_jac_data)
Calculate inverse of tensor rank 2 at integration points.
Definition: Templates.hpp:442
Get field values and gradients at Gauss points.
FTensor::Index< 'j', 3 > j
FTensor::Index< 'k', 3 > k
FTensor::Tensor1< FTensor::PackPtr< double *, Tensor_Dim >, Tensor_Dim > getFTensor1FieldData()
Return FTensor of rank 1, i.e. vector from filed data coeffinects.
const Tensor2_symmetric_Expr< const ddTensor0< T, Dim, i, j >, typename promote< T, double >::V, Dim, i, j > dd(const Tensor0< T * > &a, const Index< i, Dim > index1, const Index< j, Dim > index2, const Tensor1< int, Dim > &d_ijk, const Tensor1< double, Dim > &d_xyz)
Definition: ddTensor0.hpp:33
FieldApproximationBase
approximation base
Definition: definitions.h:144
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
Definition: Exceptions.hpp:66
Ainsworth Cole (Legendre) approx. base .
Definition: definitions.h:146
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Index< 'k', 3 > k
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
#define CHKERR
Inline error check.
Definition: definitions.h:596
FTensor::Tensor2< double *, 3, 3 > tJac
virtual MoFEMErrorCode opRhs(DataForcesAndSourcesCore &data, const bool do_vertices, const bool do_edges, const bool do_quads, const bool do_tris, const bool do_tets, const bool do_prisms, const bool error_if_no_base=true)
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
const VectorDofs & getFieldDofs() const
get dofs data stature FEDofEntity
FTensor::Tensor2< FTensor::PackPtr< double *, Tensor_Dim0 *Tensor_Dim1 >, Tensor_Dim0, Tensor_Dim1 > getFTensor2DiffN(FieldApproximationBase base)
Get derivatives of base functions for Hdiv space.
virtual std::array< boost::shared_ptr< MatrixDouble >, MaxBernsteinBezierOrder > & getBBDiffNByOrderArray()
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
Data on single entity (This is passed as argument to DataOperator::doWork)
FTensor::Index< 'j', 3 > j
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:407
virtual MoFEMErrorCode opLhs(DataForcesAndSourcesCore &row_data, DataForcesAndSourcesCore &col_data, bool symm=true)
FTensor::Index< 'j', 3 > j
ublas::vector< double, DoubleAllocator > VectorDouble
Definition: Types.hpp:72
MatrixDouble & getN(const FieldApproximationBase base)
get base functions this return matrix (nb. of rows is equal to nb. of Gauss pts, nb....
const VectorDouble & getFieldData() const
get dofs values
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Tensor1< FTensor::PackPtr< double *, Tensor_Dim >, Tensor_Dim > getFTensor1N(FieldApproximationBase base)
Get base functions for Hdiv/Hcurl spaces.
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Tensor0< FTensor::PackPtr< double *, 1 > > getFTensor0N(const FieldApproximationBase base)
Get base function as Tensor0.
MatrixDouble & getDiffN(const FieldApproximationBase base)
get derivatives of base functions
constexpr std::enable_if<(Dim0<=2 &&Dim1<=2), Tensor2_Expr< Levi_Civita< T >, T, Dim0, Dim1, i, j > >::type levi_civita(const Index< i, Dim0 > &, const Index< j, Dim1 > &)
levi_civita functions to make for easy adhoc use