v0.8.23
DataOperators.cpp
Go to the documentation of this file.
1 /** file DataOperators.cpp
2 
3  \brief implementation of Data Operators for Forces and Sources
4 
5 */
6 
7 /* This file is part of MoFEM.
8  * MoFEM is free software: you can redistribute it and/or modify it under
9  * the terms of the GNU Lesser General Public License as published by the
10  * Free Software Foundation, either version 3 of the License, or (at your
11  * option) any later version.
12  *
13  * MoFEM is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
16  * License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with MoFEM. If not, see <http://www.gnu.org/licenses/>. */
20 
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24 #include <cblas.h>
25 #include <lapack_wrap.h>
26 #include <gm_rule.h>
27 #ifdef __cplusplus
28 }
29 #endif
30 
31 namespace MoFEM {
32 
34  DataForcesAndSourcesCore &col_data,
35  bool symm) {
37 
38  // nodes
39  for (unsigned int nn = 0; nn != row_data.dataOnEntities[MBVERTEX].size();
40  nn++) {
41  unsigned int NN = 0;
42  if (symm)
43  NN = nn;
44  for (; NN != col_data.dataOnEntities[MBVERTEX].size(); NN++) {
45  ierr = doWork(nn, NN, MBVERTEX, MBVERTEX,
46  row_data.dataOnEntities[MBVERTEX][0],
47  col_data.dataOnEntities[MBVERTEX][0]);
48  CHKERRG(ierr);
49  }
50  if (!symm) {
51  for (unsigned int EE = 0; EE < col_data.dataOnEntities[MBEDGE].size();
52  EE++) {
53  if (col_data.dataOnEntities[MBEDGE][EE].getN().size1() == 0)
54  continue;
55  ierr = doWork(nn, EE, MBVERTEX, MBEDGE,
56  row_data.dataOnEntities[MBVERTEX][0],
57  col_data.dataOnEntities[MBEDGE][EE]);
58  CHKERRG(ierr);
59  }
60  for (unsigned int FF = 0; FF < col_data.dataOnEntities[MBTRI].size();
61  FF++) {
62  if (col_data.dataOnEntities[MBTRI][FF].getN().size1() == 0)
63  continue;
64  ierr = doWork(nn, FF, MBVERTEX, MBTRI,
65  row_data.dataOnEntities[MBVERTEX][0],
66  col_data.dataOnEntities[MBTRI][FF]);
67  CHKERRG(ierr);
68  }
69  for (unsigned int QQ = 0; QQ < col_data.dataOnEntities[MBQUAD].size();
70  QQ++) {
71  if (col_data.dataOnEntities[MBQUAD][QQ].getN().size1() == 0)
72  continue;
73  ierr = doWork(nn, QQ, MBVERTEX, MBQUAD,
74  row_data.dataOnEntities[MBVERTEX][0],
75  col_data.dataOnEntities[MBQUAD][QQ]);
76  CHKERRG(ierr);
77  }
78  }
79  for (unsigned int VV = 0; VV < col_data.dataOnEntities[MBTET].size();
80  VV++) {
81  if (col_data.dataOnEntities[MBTET][VV].getN().size1() == 0)
82  continue;
83  ierr =
84  doWork(nn, VV, MBVERTEX, MBTET, row_data.dataOnEntities[MBVERTEX][0],
85  col_data.dataOnEntities[MBTET][VV]);
86  CHKERRG(ierr);
87  }
88  for (unsigned int PP = 0; PP < col_data.dataOnEntities[MBPRISM].size();
89  PP++) {
90  if (col_data.dataOnEntities[MBPRISM][PP].getN().size1() == 0)
91  continue;
92  ierr = doWork(nn, PP, MBVERTEX, MBPRISM,
93  row_data.dataOnEntities[MBVERTEX][0],
94  col_data.dataOnEntities[MBPRISM][PP]);
95  CHKERRG(ierr);
96  }
97  for (unsigned int MM = 0; MM < col_data.dataOnEntities[MBENTITYSET].size();
98  MM++) {
99  if (row_data.dataOnEntities[MBENTITYSET][MM].getIndices().empty() &&
100  row_data.dataOnEntities[MBENTITYSET][MM].getFieldData().empty())
101  continue;
102  ierr = doWork(nn, MM, MBVERTEX, MBENTITYSET,
103  row_data.dataOnEntities[MBVERTEX][0],
104  col_data.dataOnEntities[MBENTITYSET][MM]);
105  CHKERRG(ierr);
106  }
107  }
108 
109  // edges
110  for (unsigned int ee = 0; ee < row_data.dataOnEntities[MBEDGE].size(); ee++) {
111  if (row_data.dataOnEntities[MBEDGE][ee].getN().size1() == 0)
112  continue;
113  for (unsigned int NN = 0; NN != col_data.dataOnEntities[MBVERTEX].size();
114  NN++) {
115  ierr =
116  doWork(ee, NN, MBEDGE, MBVERTEX, row_data.dataOnEntities[MBEDGE][ee],
117  col_data.dataOnEntities[MBVERTEX][0]);
118  CHKERRG(ierr);
119  }
120  unsigned int EE = 0;
121  if (symm)
122  EE = ee;
123  for (; EE < col_data.dataOnEntities[MBEDGE].size(); EE++) {
124  if (col_data.dataOnEntities[MBEDGE][EE].getN().size1() == 0)
125  continue;
126  ierr = doWork(ee, EE, MBEDGE, MBEDGE, row_data.dataOnEntities[MBEDGE][ee],
127  col_data.dataOnEntities[MBEDGE][EE]);
128  CHKERRG(ierr);
129  }
130  // tris
131  for (unsigned int FF = 0; FF < col_data.dataOnEntities[MBTRI].size();
132  FF++) {
133  if (col_data.dataOnEntities[MBTRI][FF].getN().size1() == 0)
134  continue;
135  ierr = doWork(ee, FF, MBEDGE, MBTRI, row_data.dataOnEntities[MBEDGE][ee],
136  col_data.dataOnEntities[MBTRI][FF]);
137  CHKERRG(ierr);
138  }
139  // quad
140  for (unsigned int QQ = 0; QQ < col_data.dataOnEntities[MBQUAD].size();
141  QQ++) {
142  if (col_data.dataOnEntities[MBQUAD][QQ].getN().size1() == 0)
143  continue;
144  ierr = doWork(ee, QQ, MBEDGE, MBQUAD, row_data.dataOnEntities[MBEDGE][ee],
145  col_data.dataOnEntities[MBQUAD][QQ]);
146  CHKERRG(ierr);
147  }
148  // tet
149  for (unsigned int VV = 0; VV < col_data.dataOnEntities[MBTET].size();
150  VV++) {
151  if (col_data.dataOnEntities[MBTET][VV].getN().size1() == 0)
152  continue;
153  ierr = doWork(ee, VV, MBEDGE, MBTET, row_data.dataOnEntities[MBEDGE][ee],
154  col_data.dataOnEntities[MBTET][VV]);
155  CHKERRG(ierr);
156  }
157  // prism
158  for (unsigned int PP = 0; PP < col_data.dataOnEntities[MBPRISM].size();
159  PP++) {
160  if (col_data.dataOnEntities[MBPRISM][PP].getN().size1() == 0)
161  continue;
162  ierr =
163  doWork(ee, PP, MBEDGE, MBPRISM, row_data.dataOnEntities[MBEDGE][ee],
164  col_data.dataOnEntities[MBPRISM][PP]);
165  CHKERRG(ierr);
166  }
167  for (unsigned int MM = 0; MM < col_data.dataOnEntities[MBENTITYSET].size();
168  MM++) {
169  if (col_data.dataOnEntities[MBENTITYSET][MM].getIndices().empty() &&
170  col_data.dataOnEntities[MBENTITYSET][MM].getFieldData().empty())
171  continue;
172  ierr = doWork(ee, MM, MBEDGE, MBENTITYSET,
173  row_data.dataOnEntities[MBEDGE][ee],
174  col_data.dataOnEntities[MBENTITYSET][MM]);
175  CHKERRG(ierr);
176  }
177  }
178 
179  // faces
180  for (unsigned int ff = 0; ff < row_data.dataOnEntities[MBTRI].size(); ff++) {
181  if (row_data.dataOnEntities[MBTRI][ff].getN().size1() == 0)
182  continue;
183  for (unsigned int NN = 0; NN != col_data.dataOnEntities[MBVERTEX].size();
184  NN++) {
185  ierr = doWork(ff, NN, MBTRI, MBVERTEX, row_data.dataOnEntities[MBTRI][ff],
186  col_data.dataOnEntities[MBVERTEX][0]);
187  CHKERRG(ierr);
188  }
189  if (!symm) {
190  unsigned int EE = 0;
191  for (; EE < col_data.dataOnEntities[MBEDGE].size(); EE++) {
192  if (col_data.dataOnEntities[MBEDGE][EE].getN().size1() == 0)
193  continue;
194  ierr = doWork(ff, EE, MBTRI, MBEDGE, row_data.dataOnEntities[MBTRI][ff],
195  col_data.dataOnEntities[MBEDGE][EE]);
196  CHKERRG(ierr);
197  }
198  }
199  unsigned int FF = 0;
200  if (symm)
201  FF = ff;
202  for (; FF < col_data.dataOnEntities[MBTRI].size(); FF++) {
203  if (col_data.dataOnEntities[MBTRI][FF].getN().size1() == 0)
204  continue;
205  ierr = doWork(ff, FF, MBTRI, MBTRI, row_data.dataOnEntities[MBTRI][ff],
206  col_data.dataOnEntities[MBTRI][FF]);
207  CHKERRG(ierr);
208  }
209  for (unsigned int QQ = 0; QQ < col_data.dataOnEntities[MBQUAD].size();
210  QQ++) {
211  if (col_data.dataOnEntities[MBQUAD][QQ].getN().size1() == 0)
212  continue;
213  ierr = doWork(ff, QQ, MBTRI, MBQUAD, row_data.dataOnEntities[MBTRI][ff],
214  col_data.dataOnEntities[MBQUAD][QQ]);
215  CHKERRG(ierr);
216  }
217  for (unsigned int VV = 0; VV < col_data.dataOnEntities[MBTET].size();
218  VV++) {
219  if (col_data.dataOnEntities[MBTET][VV].getN().size1() == 0)
220  continue;
221  ierr = doWork(ff, VV, MBTRI, MBTET, row_data.dataOnEntities[MBTRI][ff],
222  col_data.dataOnEntities[MBTET][VV]);
223  CHKERRG(ierr);
224  }
225  for (unsigned int PP = 0; PP < col_data.dataOnEntities[MBPRISM].size();
226  PP++) {
227  if (col_data.dataOnEntities[MBPRISM][PP].getN().size1() == 0)
228  continue;
229  ierr = doWork(ff, PP, MBTRI, MBPRISM, row_data.dataOnEntities[MBTRI][ff],
230  col_data.dataOnEntities[MBPRISM][PP]);
231  CHKERRG(ierr);
232  }
233  for (unsigned int MM = 0; MM < col_data.dataOnEntities[MBENTITYSET].size();
234  MM++) {
235  if (col_data.dataOnEntities[MBENTITYSET][MM].getIndices().empty() &&
236  col_data.dataOnEntities[MBENTITYSET][MM].getFieldData().empty())
237  continue;
238  ierr =
239  doWork(ff, MM, MBTRI, MBENTITYSET, row_data.dataOnEntities[MBTRI][ff],
240  col_data.dataOnEntities[MBENTITYSET][MM]);
241  CHKERRG(ierr);
242  }
243  }
244 
245  // quads
246  for (unsigned int qq = 0; qq < row_data.dataOnEntities[MBQUAD].size(); qq++) {
247  if (row_data.dataOnEntities[MBQUAD][qq].getN().size1() == 0)
248  continue;
249  for (unsigned int NN = 0; NN != col_data.dataOnEntities[MBVERTEX].size();
250  NN++) {
251  ierr =
252  doWork(qq, NN, MBQUAD, MBVERTEX, row_data.dataOnEntities[MBQUAD][qq],
253  col_data.dataOnEntities[MBVERTEX][0]);
254  CHKERRG(ierr);
255  }
256  if (!symm) {
257  unsigned int EE = 0;
258  for (; EE < col_data.dataOnEntities[MBEDGE].size(); EE++) {
259  if (col_data.dataOnEntities[MBEDGE][EE].getN().size1() == 0)
260  continue;
261  ierr =
262  doWork(qq, EE, MBQUAD, MBEDGE, row_data.dataOnEntities[MBQUAD][qq],
263  col_data.dataOnEntities[MBEDGE][EE]);
264  CHKERRG(ierr);
265  }
266  unsigned int FF = 0;
267  for (; FF < col_data.dataOnEntities[MBTRI].size(); FF++) {
268  if (col_data.dataOnEntities[MBTRI][FF].getN().size1() == 0)
269  continue;
270  ierr =
271  doWork(qq, FF, MBQUAD, MBTRI, row_data.dataOnEntities[MBQUAD][qq],
272  col_data.dataOnEntities[MBTRI][FF]);
273  CHKERRG(ierr);
274  }
275  }
276  unsigned int QQ = 0;
277  if (symm)
278  QQ = qq;
279  for (; QQ < col_data.dataOnEntities[MBQUAD].size(); QQ++) {
280  if (col_data.dataOnEntities[MBQUAD][QQ].getN().size1() == 0)
281  continue;
282  ierr = doWork(qq, QQ, MBQUAD, MBQUAD, row_data.dataOnEntities[MBQUAD][qq],
283  col_data.dataOnEntities[MBQUAD][QQ]);
284  CHKERRG(ierr);
285  }
286  for (unsigned int PP = 0; PP < col_data.dataOnEntities[MBPRISM].size();
287  PP++) {
288  if (col_data.dataOnEntities[MBPRISM][PP].getN().size1() == 0)
289  continue;
290  ierr = doWork(qq, PP, MBTRI, MBPRISM, row_data.dataOnEntities[MBQUAD][qq],
291  col_data.dataOnEntities[MBPRISM][PP]);
292  CHKERRG(ierr);
293  }
294  for (unsigned int MM = 0; MM < col_data.dataOnEntities[MBENTITYSET].size();
295  MM++) {
296  if (col_data.dataOnEntities[MBENTITYSET][MM].getIndices().empty() &&
297  col_data.dataOnEntities[MBENTITYSET][MM].getFieldData().empty())
298  continue;
299  ierr = doWork(qq, MM, MBQUAD, MBENTITYSET,
300  row_data.dataOnEntities[MBQUAD][qq],
301  col_data.dataOnEntities[MBENTITYSET][MM]);
302  CHKERRG(ierr);
303  }
304  }
305 
306  // volumes
307  for (unsigned int vv = 0; vv < row_data.dataOnEntities[MBTET].size(); vv++) {
308  if (row_data.dataOnEntities[MBTET][vv].getN().size1() == 0)
309  continue;
310  if (!symm) {
311  // vertex
312  for (unsigned int NN = 0; NN != col_data.dataOnEntities[MBVERTEX].size();
313  NN++) {
314  ierr =
315  doWork(vv, NN, MBTET, MBVERTEX, row_data.dataOnEntities[MBTET][vv],
316  col_data.dataOnEntities[MBVERTEX][0]);
317  CHKERRG(ierr);
318  }
319  // edges
320  for (unsigned int EE = 0; EE < col_data.dataOnEntities[MBEDGE].size();
321  EE++) {
322  if (col_data.dataOnEntities[MBEDGE][EE].getN().size1() == 0)
323  continue;
324  ierr = doWork(vv, EE, MBTET, MBEDGE, row_data.dataOnEntities[MBTET][vv],
325  col_data.dataOnEntities[MBEDGE][EE]);
326  CHKERRG(ierr);
327  }
328  // faces
329  for (unsigned int FF = 0; FF < col_data.dataOnEntities[MBTRI].size();
330  FF++) {
331  if (col_data.dataOnEntities[MBTRI][FF].getN().size1() == 0)
332  continue;
333  ierr = doWork(vv, FF, MBTET, MBTRI, row_data.dataOnEntities[MBTET][vv],
334  col_data.dataOnEntities[MBTRI][FF]);
335  CHKERRG(ierr);
336  }
337  }
338  unsigned int VV = 0;
339  if (symm)
340  VV = vv;
341  for (; VV < col_data.dataOnEntities[MBTET].size(); VV++) {
342  if (col_data.dataOnEntities[MBTET][VV].getN().size1() == 0)
343  continue;
344  ierr = doWork(vv, VV, MBTET, MBTET, row_data.dataOnEntities[MBTET][vv],
345  col_data.dataOnEntities[MBTET][VV]);
346  CHKERRG(ierr);
347  }
348  for (unsigned int MM = 0; MM < col_data.dataOnEntities[MBENTITYSET].size();
349  MM++) {
350  if (col_data.dataOnEntities[MBENTITYSET][MM].getIndices().empty() &&
351  col_data.dataOnEntities[MBENTITYSET][MM].getFieldData().empty())
352  continue;
353  ierr =
354  doWork(vv, MM, MBTET, MBENTITYSET, row_data.dataOnEntities[MBTET][vv],
355  col_data.dataOnEntities[MBENTITYSET][MM]);
356  CHKERRG(ierr);
357  }
358  }
359 
360  for (unsigned int pp = 0; pp < row_data.dataOnEntities[MBPRISM].size();
361  pp++) {
362  if (row_data.dataOnEntities[MBPRISM][pp].getN().size1() == 0)
363  continue;
364  if (!symm) {
365  // vertex
366  for (unsigned int NN = 0; NN != col_data.dataOnEntities[MBVERTEX].size();
367  NN++) {
368  ierr = doWork(pp, NN, MBPRISM, MBVERTEX,
369  row_data.dataOnEntities[MBPRISM][pp],
370  col_data.dataOnEntities[MBVERTEX][0]);
371  CHKERRG(ierr);
372  }
373  // edges
374  for (unsigned int EE = 0; EE < col_data.dataOnEntities[MBEDGE].size();
375  EE++) {
376  if (col_data.dataOnEntities[MBEDGE][EE].getN().size1() == 0)
377  continue;
378  ierr = doWork(pp, EE, MBPRISM, MBEDGE,
379  row_data.dataOnEntities[MBPRISM][pp],
380  col_data.dataOnEntities[MBEDGE][EE]);
381  CHKERRG(ierr);
382  }
383  // faces
384  for (unsigned int FF = 0; FF < col_data.dataOnEntities[MBTRI].size();
385  FF++) {
386  if (col_data.dataOnEntities[MBTRI][FF].getN().size1() == 0)
387  continue;
388  ierr =
389  doWork(pp, FF, MBPRISM, MBTRI, row_data.dataOnEntities[MBPRISM][pp],
390  col_data.dataOnEntities[MBTRI][FF]);
391  CHKERRG(ierr);
392  }
393  // quads
394  for (unsigned int QQ = 0; QQ < col_data.dataOnEntities[MBQUAD].size();
395  QQ++) {
396  if (col_data.dataOnEntities[MBQUAD][QQ].getN().size1() == 0)
397  continue;
398  ierr = doWork(pp, QQ, MBPRISM, MBQUAD,
399  row_data.dataOnEntities[MBPRISM][pp],
400  col_data.dataOnEntities[MBQUAD][QQ]);
401  CHKERRG(ierr);
402  }
403  }
404  unsigned int PP = 0;
405  if (symm)
406  PP = pp;
407  for (; PP < col_data.dataOnEntities[MBPRISM].size(); PP++) {
408  if (col_data.dataOnEntities[MBPRISM][PP].getN().size1() == 0)
409  continue;
410  ierr =
411  doWork(pp, PP, MBPRISM, MBPRISM, row_data.dataOnEntities[MBPRISM][pp],
412  col_data.dataOnEntities[MBPRISM][PP]);
413  CHKERRG(ierr);
414  }
415  for (unsigned int MM = 0; MM < col_data.dataOnEntities[MBENTITYSET].size();
416  MM++) {
417  if (col_data.dataOnEntities[MBENTITYSET][MM].getIndices().empty() &&
418  col_data.dataOnEntities[MBENTITYSET][MM].getFieldData().empty())
419  continue;
420  ierr = doWork(pp, MM, MBPRISM, MBENTITYSET,
421  row_data.dataOnEntities[MBPRISM][pp],
422  col_data.dataOnEntities[MBENTITYSET][MM]);
423  CHKERRG(ierr);
424  }
425  }
426 
427  // meshsets
428  for (unsigned int mm = 0; mm < row_data.dataOnEntities[MBENTITYSET].size();
429  mm++) {
430  if (row_data.dataOnEntities[MBENTITYSET][mm].getIndices().empty() &&
431  row_data.dataOnEntities[MBENTITYSET][mm].getFieldData().empty())
432  continue;
433  if (!symm) {
434  // vertex
435  for (unsigned int NN = 0; NN != col_data.dataOnEntities[MBVERTEX].size();
436  NN++) {
437  ierr = doWork(mm, NN, MBENTITYSET, MBVERTEX,
438  row_data.dataOnEntities[MBENTITYSET][mm],
439  col_data.dataOnEntities[MBVERTEX][0]);
440  CHKERRG(ierr);
441  }
442  // edges
443  for (unsigned int EE = 0; EE < col_data.dataOnEntities[MBEDGE].size();
444  EE++) {
445  if (col_data.dataOnEntities[MBEDGE][EE].getN().size1() == 0)
446  continue;
447  ierr = doWork(mm, EE, MBENTITYSET, MBEDGE,
448  row_data.dataOnEntities[MBENTITYSET][mm],
449  col_data.dataOnEntities[MBEDGE][EE]);
450  CHKERRG(ierr);
451  }
452  // faces
453  for (unsigned int FF = 0; FF < col_data.dataOnEntities[MBTRI].size();
454  FF++) {
455  if (col_data.dataOnEntities[MBTRI][FF].getN().size1() == 0)
456  continue;
457  ierr = doWork(mm, FF, MBENTITYSET, MBTRI,
458  row_data.dataOnEntities[MBENTITYSET][mm],
459  col_data.dataOnEntities[MBTRI][FF]);
460  CHKERRG(ierr);
461  }
462  // quad
463  for (unsigned int QQ = 0; QQ < col_data.dataOnEntities[MBQUAD].size();
464  QQ++) {
465  if (col_data.dataOnEntities[MBQUAD][QQ].getN().size1() == 0)
466  continue;
467  ierr = doWork(mm, QQ, MBENTITYSET, MBQUAD,
468  row_data.dataOnEntities[MBENTITYSET][mm],
469  col_data.dataOnEntities[MBQUAD][QQ]);
470  CHKERRG(ierr);
471  }
472  // volume
473  for (unsigned int VV = 0; VV < col_data.dataOnEntities[MBTET].size();
474  VV++) {
475  ierr = doWork(mm, VV, MBENTITYSET, MBTET,
476  row_data.dataOnEntities[MBENTITYSET][mm],
477  col_data.dataOnEntities[MBTET][VV]);
478  CHKERRG(ierr);
479  }
480  for (unsigned int PP = 0; PP < col_data.dataOnEntities[MBPRISM].size();
481  PP++) {
482  ierr = doWork(mm, PP, MBENTITYSET, MBPRISM,
483  row_data.dataOnEntities[MBENTITYSET][mm],
484  col_data.dataOnEntities[MBPRISM][PP]);
485  CHKERRG(ierr);
486  }
487  }
488  unsigned int MM = 0;
489  if (symm)
490  MM = mm;
491  for (; MM < col_data.dataOnEntities[MBENTITYSET].size(); MM++) {
492  if (row_data.dataOnEntities[MBENTITYSET][MM].getIndices().empty() &&
493  row_data.dataOnEntities[MBENTITYSET][MM].getFieldData().empty())
494  continue;
495  ierr = doWork(mm, MM, MBENTITYSET, MBENTITYSET,
496  row_data.dataOnEntities[MBENTITYSET][mm],
497  col_data.dataOnEntities[MBENTITYSET][MM]);
498  CHKERRG(ierr);
499  }
500  }
501 
503 }
504 
505 template <>
506 MoFEMErrorCode invertTensor3by3<3, double, ublas::row_major, DoubleAllocator>(
507  MatrixDouble &jac_data, VectorDouble &det_data,
508  MatrixDouble &inv_jac_data) {
509 
511  auto A = getFTensor2FromMat<3, 3>(jac_data);
512  int nb_gauss_pts = jac_data.size2();
513  det_data.resize(nb_gauss_pts, false);
514  inv_jac_data.resize(3, nb_gauss_pts, false);
515  auto det = getFTensor0FromVec(det_data);
516  auto I = getFTensor2FromMat<3, 3>(inv_jac_data);
517  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
518  ierr = determinantTensor3by3(A, det);
519  CHKERRG(ierr);
520  ierr = invertTensor3by3(A, det, I);
521  CHKERRG(ierr);
522  ++A;
523  ++det;
524  ++I;
525  }
527 }
528 
530  const bool do_vertices, const bool do_edges,
531  const bool do_quads, const bool do_tris,
532  const bool do_tets, const bool do_prisms,
533  const bool error_if_no_base) {
535 
536  if (do_vertices) {
537  for (unsigned int nn = 0; nn < data.dataOnEntities[MBVERTEX].size(); nn++) {
538  if (error_if_no_base &&
539  data.dataOnEntities[MBVERTEX][nn].getFieldData().size() &&
540  (data.dataOnEntities[MBVERTEX][nn].getBase() == NOBASE ||
541  data.dataOnEntities[MBVERTEX][nn].getBase() == LASTBASE)) {
542  for (VectorDofs::iterator it =
543  data.dataOnEntities[MBVERTEX][nn].getFieldDofs().begin();
544  it != data.dataOnEntities[MBVERTEX][nn].getFieldDofs().end(); it++)
545  if ((*it) && (*it)->getActive()) {
546  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
547  "No base on Vertex and side %d", nn);
548  }
549  }
550  ierr = doWork(nn, MBVERTEX, data.dataOnEntities[MBVERTEX][nn]);
551  CHKERRG(ierr);
552  }
553  }
554  if (do_edges) {
555  for (unsigned int ee = 0; ee < data.dataOnEntities[MBEDGE].size(); ee++) {
556  if (error_if_no_base &&
557  data.dataOnEntities[MBEDGE][ee].getFieldData().size() &&
558  (data.dataOnEntities[MBEDGE][ee].getBase() == NOBASE ||
559  data.dataOnEntities[MBEDGE][ee].getBase() == LASTBASE)) {
560  for (VectorDofs::iterator it =
561  data.dataOnEntities[MBEDGE][ee].getFieldDofs().begin();
562  it != data.dataOnEntities[MBEDGE][ee].getFieldDofs().end(); it++)
563  if ((*it) && (*it)->getActive()) {
564  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
565  "No base on Edge and side %d", ee);
566  }
567  }
568  ierr = doWork(ee, MBEDGE, data.dataOnEntities[MBEDGE][ee]);
569  CHKERRG(ierr);
570  }
571  }
572  if (do_tris) {
573  for (unsigned int ff = 0; ff < data.dataOnEntities[MBTRI].size(); ff++) {
574  if (error_if_no_base &&
575  data.dataOnEntities[MBTRI][ff].getFieldData().size() &&
576  (data.dataOnEntities[MBTRI][ff].getBase() == NOBASE ||
577  data.dataOnEntities[MBTRI][ff].getBase() == LASTBASE)) {
578  for (VectorDofs::iterator it =
579  data.dataOnEntities[MBTRI][ff].getFieldDofs().begin();
580  it != data.dataOnEntities[MBTRI][ff].getFieldDofs().end(); it++)
581  if ((*it) && (*it)->getActive()) {
582  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
583  "No base on Triangle and side %d", ff);
584  }
585  }
586  ierr = doWork(ff, MBTRI, data.dataOnEntities[MBTRI][ff]);
587  CHKERRG(ierr);
588  }
589  }
590  if (do_quads) {
591  for (unsigned int qq = 0; qq < data.dataOnEntities[MBQUAD].size(); qq++) {
592  if (error_if_no_base &&
593  data.dataOnEntities[MBQUAD][qq].getFieldData().size() &&
594  (data.dataOnEntities[MBQUAD][qq].getBase() == NOBASE ||
595  data.dataOnEntities[MBQUAD][qq].getBase() == LASTBASE)) {
596  for (VectorDofs::iterator it =
597  data.dataOnEntities[MBQUAD][qq].getFieldDofs().begin();
598  it != data.dataOnEntities[MBQUAD][qq].getFieldDofs().end(); it++)
599  if ((*it) && (*it)->getActive()) {
600  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
601  "No base on Quad and side %d", qq);
602  }
603  }
604  ierr = doWork(qq, MBQUAD, data.dataOnEntities[MBQUAD][qq]);
605  CHKERRG(ierr);
606  }
607  }
608  if (do_tets) {
609  for (unsigned int vv = 0; vv < data.dataOnEntities[MBTET].size(); vv++) {
610  if (error_if_no_base &&
611  data.dataOnEntities[MBTET][vv].getFieldData().size() &&
612  (data.dataOnEntities[MBTET][vv].getBase() == NOBASE &&
613  data.dataOnEntities[MBTET][vv].getBase() == LASTBASE)) {
614  for (VectorDofs::iterator it =
615  data.dataOnEntities[MBTET][vv].getFieldDofs().begin();
616  it != data.dataOnEntities[MBTET][vv].getFieldDofs().end(); it++)
617  if ((*it) && (*it)->getActive()) {
618  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
619  "No base on Tet and side %d", vv);
620  }
621  }
622  ierr = doWork(vv, MBTET, data.dataOnEntities[MBTET][vv]);
623  CHKERRG(ierr);
624  }
625  }
626  if (do_prisms) {
627  for (unsigned int pp = 0; pp < data.dataOnEntities[MBPRISM].size(); pp++) {
628  if (error_if_no_base &&
629  data.dataOnEntities[MBPRISM][pp].getFieldData().size() &&
630  (data.dataOnEntities[MBPRISM][pp].getBase() == NOBASE ||
631  data.dataOnEntities[MBPRISM][pp].getBase() == LASTBASE)) {
632  for (VectorDofs::iterator it =
633  data.dataOnEntities[MBPRISM][pp].getFieldDofs().begin();
634  it != data.dataOnEntities[MBPRISM][pp].getFieldDofs().end(); it++)
635  if ((*it) && (*it)->getActive()) {
636  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
637  "No base on Prism and side %d", pp);
638  }
639  }
640  ierr = doWork(pp, MBPRISM, data.dataOnEntities[MBPRISM][pp]);
641  CHKERRG(ierr);
642  }
643  }
644  for (unsigned int mm = 0; mm < data.dataOnEntities[MBENTITYSET].size();
645  mm++) {
646  if (data.dataOnEntities[MBENTITYSET][mm].getIndices().empty() &&
647  data.dataOnEntities[MBENTITYSET][mm].getFieldData().empty())
648  continue;
649  ierr = doWork(mm, MBENTITYSET, data.dataOnEntities[MBENTITYSET][mm]);
650  CHKERRG(ierr);
651  }
652 
654 }
655 
656 MoFEMErrorCode OpSetInvJacH1::doWork(int side, EntityType type,
659 
660  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
661 
662  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
663  const unsigned int nb_base_functions = data.getN(base).size2();
664  if (!nb_base_functions)
665  continue;
666  const unsigned int nb_gauss_pts = data.getN(base).size1();
667  if (!nb_gauss_pts)
668  continue;
669 
670  if (type != MBVERTEX) {
671  if (nb_base_functions != data.getDiffN(base).size2() / 3) {
672  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
673  "Data inconsistency nb_base_functions != data.diffN.size2()/3 "
674  "( %u != %u/3 )",
675  nb_base_functions, data.getDiffN(base).size2());
676  }
677  } else {
678  if (data.getDiffN(base).size1() != 4 ||
679  data.getDiffN(base).size2() != 3) {
680  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
681  "Data inconsistency");
682  }
683  }
684 
685  diffNinvJac.resize(data.getDiffN(base).size1(), data.getDiffN(base).size2(),
686  false);
687 
688  double *t_diff_n_ptr = &*data.getDiffN(base).data().begin();
690  t_diff_n_ptr, &t_diff_n_ptr[1], &t_diff_n_ptr[2]);
691  double *t_inv_n_ptr = &*diffNinvJac.data().begin();
693  t_inv_n_ptr, &t_inv_n_ptr[1], &t_inv_n_ptr[2]);
694 
695  switch (type) {
696 
697  case MBVERTEX: {
698  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
699  t_inv_diff_n(i) = t_diff_n(j) * tInvJac(j, i);
700  ++t_diff_n;
701  ++t_inv_diff_n;
702  }
703  } break;
704  case MBEDGE:
705  case MBTRI:
706  case MBTET: {
707  for (unsigned int gg = 0; gg < nb_gauss_pts; ++gg) {
708  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
709  t_inv_diff_n(i) = t_diff_n(j) * tInvJac(j, i);
710  ++t_diff_n;
711  ++t_inv_diff_n;
712  }
713  }
714 
715  } break;
716  default:
717  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED, "not implemented");
718  }
719 
720  data.getDiffN(base).data().swap(diffNinvJac.data());
721  }
722 
724 }
725 
727 OpSetInvJacHdivAndHcurl::doWork(int side, EntityType type,
730 
731  if (type != MBEDGE && type != MBTRI && type != MBTET)
733 
734  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
735 
736  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
737 
738  const unsigned int nb_gauss_pts = data.getDiffN(base).size1();
739  const unsigned int nb_base_functions = data.getDiffN(base).size2() / 9;
740  if (!nb_base_functions)
741  continue;
742 
743  diffHdivInvJac.resize(nb_gauss_pts, data.getDiffN(base).size2(), false);
744 
745  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
746  double *t_inv_diff_n_ptr = &*diffHdivInvJac.data().begin();
748  t_inv_diff_n_ptr, &t_inv_diff_n_ptr[HVEC0_1],
749  &t_inv_diff_n_ptr[HVEC0_2],
750 
751  &t_inv_diff_n_ptr[HVEC1_0], &t_inv_diff_n_ptr[HVEC1_1],
752  &t_inv_diff_n_ptr[HVEC1_2],
753 
754  &t_inv_diff_n_ptr[HVEC2_0], &t_inv_diff_n_ptr[HVEC2_1],
755  &t_inv_diff_n_ptr[HVEC2_2]);
756 
757  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
758  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
759  t_inv_diff_n(k, i) = t_diff_n(k, j) * tInvJac(j, i);
760  ++t_diff_n;
761  ++t_inv_diff_n;
762  }
763  }
764 
765  data.getDiffN(base).data().swap(diffHdivInvJac.data());
766  }
767 
769 }
770 
772  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
774 
775  if (type != MBTRI && type != MBTET)
777 
778  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
779 
780  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
781 
782  const unsigned int nb_base_functions = data.getN(base).size2() / 3;
783  if (!nb_base_functions)
784  continue;
785 
786  const double c = 1. / 6.;
787  const unsigned int nb_gauss_pts = data.getN(base).size1();
788 
789  double const a = c / vOlume;
790 
791  piolaN.resize(nb_gauss_pts, data.getN(base).size2(), false);
792  if (data.getN(base).size2() > 0) {
793  auto t_n = data.getFTensor1N<3>(base);
794  double *t_transformed_n_ptr = &*piolaN.data().begin();
796  t_transformed_n_ptr, // HVEC0
797  &t_transformed_n_ptr[HVEC1], &t_transformed_n_ptr[HVEC2]);
798  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
799  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
800  t_transformed_n(i) = a * tJac(i, k) * t_n(k);
801  ++t_n;
802  ++t_transformed_n;
803  }
804  }
805  data.getN(base).data().swap(piolaN.data());
806  }
807 
808  piolaDiffN.resize(nb_gauss_pts, data.getDiffN(base).size2(), false);
809  if (data.getDiffN(base).size2() > 0) {
810  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
811  double *t_transformed_diff_n_ptr = &*piolaDiffN.data().begin();
813  t_transformed_diff_n(t_transformed_diff_n_ptr,
814  &t_transformed_diff_n_ptr[HVEC0_1],
815  &t_transformed_diff_n_ptr[HVEC0_2],
816  &t_transformed_diff_n_ptr[HVEC1_0],
817  &t_transformed_diff_n_ptr[HVEC1_1],
818  &t_transformed_diff_n_ptr[HVEC1_2],
819  &t_transformed_diff_n_ptr[HVEC2_0],
820  &t_transformed_diff_n_ptr[HVEC2_1],
821  &t_transformed_diff_n_ptr[HVEC2_2]);
822  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
823  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
824  t_transformed_diff_n(i, k) = a * tJac(i, j) * t_diff_n(j, k);
825  ++t_diff_n;
826  ++t_transformed_diff_n;
827  }
828  }
829  data.getDiffN(base).data().swap(piolaDiffN.data());
830  }
831  }
832 
834 }
835 
837 OpSetCovariantPiolaTransform::doWork(int side, EntityType type,
840 
841  if (type != MBEDGE && type != MBTRI && type != MBTET)
843 
844  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
845 
846  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
847 
848  const unsigned int nb_base_functions = data.getN(base).size2() / 3;
849  if (!nb_base_functions)
850  continue;
851 
852  const unsigned int nb_gauss_pts = data.getN(base).size1();
853  piolaN.resize(nb_gauss_pts, data.getN(base).size2(), false);
854  piolaDiffN.resize(nb_gauss_pts, data.getDiffN(base).size2(), false);
855 
856  auto t_n = data.getFTensor1N<3>(base);
857  double *t_transformed_n_ptr = &*piolaN.data().begin();
859  t_transformed_n_ptr, &t_transformed_n_ptr[HVEC1],
860  &t_transformed_n_ptr[HVEC2]);
861  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
862  double *t_transformed_diff_n_ptr = &*piolaDiffN.data().begin();
863  FTensor::Tensor2<FTensor::PackPtr<double *, 9>, 3, 3> t_transformed_diff_n(
864  t_transformed_diff_n_ptr, &t_transformed_diff_n_ptr[HVEC0_1],
865  &t_transformed_diff_n_ptr[HVEC0_2], &t_transformed_diff_n_ptr[HVEC1_0],
866  &t_transformed_diff_n_ptr[HVEC1_1], &t_transformed_diff_n_ptr[HVEC1_2],
867  &t_transformed_diff_n_ptr[HVEC2_0], &t_transformed_diff_n_ptr[HVEC2_1],
868  &t_transformed_diff_n_ptr[HVEC2_2]);
869 
870  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
871  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
872  t_transformed_n(i) = tInvJac(k, i) * t_n(k);
873  t_transformed_diff_n(i, k) = tInvJac(j, i) * t_diff_n(j, k);
874  ++t_n;
875  ++t_transformed_n;
876  ++t_diff_n;
877  ++t_transformed_diff_n;
878  }
879  }
880  data.getN(base).data().swap(piolaN.data());
881  data.getDiffN(base).data().swap(piolaDiffN.data());
882  }
883 
884  // data.getBase() = base;
885 
887 }
888 
890 OpSetHoInvJacH1::doWork(int side, EntityType type,
893 
894  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
895 
896  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
897  if (data.getDiffN(base).size2() == 0)
898  continue;
899 
900  unsigned int nb_gauss_pts = data.getN(base).size1();
901  if (nb_gauss_pts == 0)
902  continue;
903  unsigned int nb_base_functions = data.getN(base).size2();
904  if (nb_base_functions == 0)
905  continue;
906 
907  if (invHoJac.size2() != 9) {
908  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
909  "It looks that ho inverse of Jacobian is not calculated %d != 9",
910  invHoJac.size2());
911  }
912  if (invHoJac.size1() != nb_gauss_pts) {
913  cerr << "type: " << type << " side: " << side << endl;
914  cerr << "shape fun: " << data.getN(base) << endl;
915  cerr << "diff shape fun " << data.getDiffN(base) << endl;
916  SETERRQ2(
917  PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
918  "It looks that ho inverse of Jacobian is not calculated %d != %d",
919  invHoJac.size1(), nb_gauss_pts);
920  }
921  double *t_inv_jac_ptr = &*invHoJac.data().begin();
923  t_inv_jac_ptr, &t_inv_jac_ptr[1], &t_inv_jac_ptr[2], &t_inv_jac_ptr[3],
924  &t_inv_jac_ptr[4], &t_inv_jac_ptr[5], &t_inv_jac_ptr[6],
925  &t_inv_jac_ptr[7], &t_inv_jac_ptr[8], 9);
926 
927  diffNinvJac.resize(nb_gauss_pts, 3 * nb_base_functions, false);
928  double *t_inv_n_ptr = &*diffNinvJac.data().begin();
929  FTensor::Tensor1<double *, 3> t_inv_diff_n(t_inv_n_ptr, &t_inv_n_ptr[1],
930  &t_inv_n_ptr[2], 3);
931 
932  switch (type) {
933  case MBVERTEX:
934  case MBEDGE:
935  case MBTRI:
936  case MBTET: {
937  FTensor::Tensor1<double *, 3> t_diff_n = data.getFTensor1DiffN<3>(base);
938  for (unsigned int gg = 0; gg < nb_gauss_pts; ++gg) {
939  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
940  t_inv_diff_n(i) = t_diff_n(j) * t_inv_jac(j, i);
941  ++t_diff_n;
942  ++t_inv_diff_n;
943  }
944  ++t_inv_jac;
945  }
946  } break;
947  default:
948  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED, "not implemented");
949  }
950 
951  if (type == MBVERTEX) {
952  data.getDiffN(base).resize(diffNinvJac.size1(), diffNinvJac.size2(),
953  false);
954  }
955  data.getDiffN(base).data().swap(diffNinvJac.data());
956  }
957 
959 }
960 
962 OpSetHoInvJacHdivAndHcurl::doWork(int side, EntityType type,
965 
966  if (type != MBEDGE && type != MBTRI && type != MBTET)
968 
969  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
970 
971  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
972 
973  diffHdivInvJac.resize(data.getDiffN(base).size1(),
974  data.getDiffN(base).size2(), false);
975 
976  unsigned int nb_gauss_pts = data.getDiffN(base).size1();
977  unsigned int nb_base_functions = data.getDiffN(base).size2() / 9;
978  if (nb_base_functions == 0)
979  continue;
980 
981  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
982  double *t_inv_diff_n_ptr = &*diffHdivInvJac.data().begin();
984  t_inv_diff_n_ptr, &t_inv_diff_n_ptr[HVEC0_1],
985  &t_inv_diff_n_ptr[HVEC0_2], &t_inv_diff_n_ptr[HVEC1_0],
986  &t_inv_diff_n_ptr[HVEC1_1], &t_inv_diff_n_ptr[HVEC1_2],
987  &t_inv_diff_n_ptr[HVEC2_0], &t_inv_diff_n_ptr[HVEC2_1],
988  &t_inv_diff_n_ptr[HVEC2_2]);
989  double *t_inv_jac_ptr = &*invHoJac.data().begin();
991  t_inv_jac_ptr, &t_inv_jac_ptr[1], &t_inv_jac_ptr[2], &t_inv_jac_ptr[3],
992  &t_inv_jac_ptr[4], &t_inv_jac_ptr[5], &t_inv_jac_ptr[6],
993  &t_inv_jac_ptr[7], &t_inv_jac_ptr[8]);
994 
995  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
996  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
997  t_inv_diff_n(i, j) = t_inv_jac(k, j) * t_diff_n(i, k);
998  ++t_diff_n;
999  ++t_inv_diff_n;
1000  }
1001  ++t_inv_jac;
1002  }
1003 
1004  data.getDiffN(base).data().swap(diffHdivInvJac.data());
1005  }
1006 
1008 }
1009 
1011  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1013 
1014  if (type != MBTRI && type != MBTET)
1016 
1017  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
1018 
1019  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
1020 
1021  unsigned int nb_gauss_pts = data.getN(base).size1();
1022  unsigned int nb_base_functions = data.getN(base).size2() / 3;
1023  piolaN.resize(nb_gauss_pts, data.getN(base).size2(), false);
1024  piolaDiffN.resize(nb_gauss_pts, data.getDiffN(base).size2(), false);
1025 
1026  auto t_n = data.getFTensor1N<3>(base);
1027  double *t_transformed_n_ptr = &*piolaN.data().begin();
1029  t_transformed_n_ptr, // HVEC0
1030  &t_transformed_n_ptr[HVEC1], &t_transformed_n_ptr[HVEC2]);
1031  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
1032  double *t_transformed_diff_n_ptr = &*piolaDiffN.data().begin();
1033  FTensor::Tensor2<FTensor::PackPtr<double *, 9>, 3, 3> t_transformed_diff_n(
1034  t_transformed_diff_n_ptr, &t_transformed_diff_n_ptr[HVEC0_1],
1035  &t_transformed_diff_n_ptr[HVEC0_2], &t_transformed_diff_n_ptr[HVEC1_0],
1036  &t_transformed_diff_n_ptr[HVEC1_1], &t_transformed_diff_n_ptr[HVEC1_2],
1037  &t_transformed_diff_n_ptr[HVEC2_0], &t_transformed_diff_n_ptr[HVEC2_1],
1038  &t_transformed_diff_n_ptr[HVEC2_2]);
1039 
1040  FTensor::Tensor0<double *> t_det(&*detHoJac.data().begin());
1041  double *t_jac_ptr = &*hoJac.data().begin();
1043  t_jac_ptr, &t_jac_ptr[1], &t_jac_ptr[2], &t_jac_ptr[3], &t_jac_ptr[4],
1044  &t_jac_ptr[5], &t_jac_ptr[6], &t_jac_ptr[7], &t_jac_ptr[8]);
1045 
1046  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1047  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
1048  const double a = 1. / t_det;
1049  t_transformed_n(i) = a * t_jac(i, k) * t_n(k);
1050  t_transformed_diff_n(i, k) = a * t_jac(i, j) * t_diff_n(j, k);
1051  ++t_n;
1052  ++t_transformed_n;
1053  ++t_diff_n;
1054  ++t_transformed_diff_n;
1055  }
1056  ++t_det;
1057  ++t_jac;
1058  }
1059 
1060  data.getN(base).data().swap(piolaN.data());
1061  data.getDiffN(base).data().swap(piolaDiffN.data());
1062  }
1063 
1065 }
1066 
1068  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1070 
1071  if (type != MBEDGE && type != MBTRI && type != MBTET)
1073 
1074  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
1075 
1076  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
1077 
1078  unsigned int nb_gauss_pts = data.getN(base).size1();
1079  unsigned int nb_base_functions = data.getN(base).size2() / 3;
1080  piolaN.resize(nb_gauss_pts, data.getN(base).size2(), false);
1081  piolaDiffN.resize(nb_gauss_pts, data.getDiffN(base).size2(), false);
1082 
1083  auto t_n = data.getFTensor1N<3>(base);
1084  double *t_transformed_n_ptr = &*piolaN.data().begin();
1086  t_transformed_n_ptr, // HVEC0
1087  &t_transformed_n_ptr[HVEC1], &t_transformed_n_ptr[HVEC2]);
1088  auto t_diff_n = data.getFTensor2DiffN<3, 3>(base);
1089  double *t_transformed_diff_n_ptr = &*piolaDiffN.data().begin();
1090  FTensor::Tensor2<FTensor::PackPtr<double *, 9>, 3, 3> t_transformed_diff_n(
1091  t_transformed_diff_n_ptr, &t_transformed_diff_n_ptr[HVEC0_1],
1092  &t_transformed_diff_n_ptr[HVEC0_2], &t_transformed_diff_n_ptr[HVEC1_0],
1093  &t_transformed_diff_n_ptr[HVEC1_1], &t_transformed_diff_n_ptr[HVEC1_2],
1094  &t_transformed_diff_n_ptr[HVEC2_0], &t_transformed_diff_n_ptr[HVEC2_1],
1095  &t_transformed_diff_n_ptr[HVEC2_2]);
1096 
1097  double *t_inv_jac_ptr = &*hoInvJac.data().begin();
1099  t_inv_jac_ptr, &t_inv_jac_ptr[1], &t_inv_jac_ptr[2], &t_inv_jac_ptr[3],
1100  &t_inv_jac_ptr[4], &t_inv_jac_ptr[5], &t_inv_jac_ptr[6],
1101  &t_inv_jac_ptr[7], &t_inv_jac_ptr[8], 9);
1102 
1103  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1104  for (unsigned int bb = 0; bb != nb_base_functions; ++bb) {
1105  t_transformed_n(i) = t_inv_jac(k, i) * t_n(k);
1106  t_transformed_diff_n(i, k) = t_inv_jac(j, i) * t_diff_n(j, k);
1107  ++t_n;
1108  ++t_transformed_n;
1109  ++t_diff_n;
1110  ++t_transformed_diff_n;
1111  }
1112  ++t_inv_jac;
1113  }
1114 
1115  data.getN(base).data().swap(piolaN.data());
1116  data.getDiffN(base).data().swap(piolaDiffN.data());
1117  }
1118 
1120 }
1121 
1123 OpGetCoordsAndNormalsOnFace::doWork(int side, EntityType type,
1126 
1127  unsigned int nb_dofs = data.getFieldData().size();
1128  if (nb_dofs == 0)
1130 
1131  int nb_gauss_pts = data.getN().size1();
1132  cOords_at_GaussPt.resize(nb_gauss_pts, 3, false);
1133  tAngent1_at_GaussPt.resize(nb_gauss_pts, 3, false);
1134  tAngent2_at_GaussPt.resize(nb_gauss_pts, 3, false);
1135 
1136  auto get_ftensor1 = [](MatrixDouble &m) {
1138  &m(0, 0), &m(0, 1), &m(0, 2));
1139  };
1140  auto t_coords = get_ftensor1(cOords_at_GaussPt);
1141  auto t_t1 = get_ftensor1(tAngent1_at_GaussPt);
1142  auto t_t2 = get_ftensor1(tAngent2_at_GaussPt);
1144  FTensor::Number<0> N0;
1145  FTensor::Number<1> N1;
1146 
1147  switch (type) {
1148  case MBVERTEX: {
1149  cOords_at_GaussPt.clear();
1150  tAngent1_at_GaussPt.clear();
1151  tAngent2_at_GaussPt.clear();
1152  auto t_base = data.getFTensor0N();
1153  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1154  auto t_data = data.getFTensor1FieldData<3>();
1155  auto t_diff_base = data.getFTensor1DiffN<2>();
1156  for (int nn = 0; nn != 3; nn++) {
1157  t_coords(i) += t_base * t_data(i);
1158  t_t1(i) += t_data(i) * t_diff_base(N0);
1159  t_t2(i) += t_data(i) * t_diff_base(N1);
1160  ++t_data;
1161  ++t_base;
1162  ++t_diff_base;
1163  }
1164  ++t_coords;
1165  ++t_t1;
1166  ++t_t2;
1167  }
1168  } break;
1169  case MBEDGE:
1170  case MBTRI: {
1171  if (2 * data.getN().size2() != data.getDiffN().size2()) {
1172  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "data inconsistency");
1173  }
1174  if (nb_dofs % 3 != 0) {
1175  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "data inconsistency");
1176  }
1177  if (nb_dofs > 3 * data.getN().size2()) {
1178  unsigned int nn = 0;
1179  for (; nn != nb_dofs; nn++) {
1180  if (!data.getFieldDofs()[nn]->getActive())
1181  break;
1182  }
1183  if (nn > 3 * data.getN().size2()) {
1184  SETERRQ1(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1185  "data inconsistency for base %s",
1187  } else {
1188  nb_dofs = nn;
1189  if (!nb_dofs)
1191  }
1192  }
1193  const int nb_base_functions = data.getN().size2();
1194  auto t_base = data.getFTensor0N();
1195  auto t_diff_base = data.getFTensor1DiffN<2>();
1196  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1197  auto t_data = data.getFTensor1FieldData<3>();
1198  int bb = 0;
1199  for (; bb != nb_dofs / 3; ++bb) {
1200  t_coords(i) += t_base * t_data(i);
1201  t_t1(i) += t_data(i) * t_diff_base(N0);
1202  t_t2(i) += t_data(i) * t_diff_base(N1);
1203  ++t_data;
1204  ++t_base;
1205  ++t_diff_base;
1206  }
1207  for (; bb != nb_base_functions; ++bb) {
1208  ++t_base;
1209  ++t_diff_base;
1210  }
1211  ++t_coords;
1212  ++t_t1;
1213  ++t_t2;
1214  }
1215  } break;
1216  default:
1217  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED, "not implemented");
1218  }
1219 
1221 }
1222 
1225 
1226  nOrmals_at_GaussPt.resize(tAngent1_at_GaussPt.size1(), 3, false);
1227 
1228  auto get_ftensor1 = [](MatrixDouble &m) {
1230  &m(0, 0), &m(0, 1), &m(0, 2));
1231  };
1232  auto t_normal = get_ftensor1(nOrmals_at_GaussPt);
1233  auto t_t1 = get_ftensor1(tAngent1_at_GaussPt);
1234  auto t_t2 = get_ftensor1(tAngent2_at_GaussPt);
1235 
1239 
1240  for (unsigned int gg = 0; gg != tAngent1_at_GaussPt.size1(); ++gg) {
1241  t_normal(j) = FTensor::levi_civita(i, j, k) * t_t1(k) * t_t2(i);
1242  ++t_normal;
1243  ++t_t1;
1244  ++t_t2;
1245  }
1246 
1248 }
1249 
1251 OpGetCoordsAndNormalsOnPrism::doWork(int side, EntityType type,
1254 
1255  if (data.getFieldData().size() == 0)
1257  const int valid_edges3[] = {1, 1, 1, 0, 0, 0, 0, 0, 0};
1258  const int valid_faces3[] = {0, 0, 0, 1, 0, 0, 0, 0, 0};
1259  const int valid_edges4[] = {0, 0, 0, 0, 0, 0, 1, 1, 1};
1260  const int valid_faces4[] = {0, 0, 0, 0, 1, 0, 0, 0, 0};
1261 
1262  if (type == MBEDGE) {
1263  if (!valid_edges3[side] || valid_edges4[side])
1265  } else if (type == MBTRI) {
1266  if (!valid_faces3[side] || valid_faces4[side])
1268  }
1269 
1270  switch (type) {
1271  case MBVERTEX: {
1272  for (unsigned int gg = 0; gg < data.getN().size1(); ++gg) {
1273  for (int dd = 0; dd < 3; dd++) {
1274  cOords_at_GaussPtF3(gg, dd) =
1275  cblas_ddot(3, &data.getN(gg)[0], 1, &data.getFieldData()[dd], 3);
1277  3, &data.getDiffN()(gg, 0), 2, &data.getFieldData()[dd], 3);
1279  3, &data.getDiffN()(gg, 1), 2, &data.getFieldData()[dd], 3);
1281  3, &data.getN(gg)[0], 1, &data.getFieldData()[9 + dd], 3);
1283  3, &data.getDiffN()(gg, 6 + 0), 2, &data.getFieldData()[9 + dd], 3);
1285  3, &data.getDiffN()(gg, 6 + 1), 2, &data.getFieldData()[9 + dd], 3);
1286  }
1287  }
1288  } break;
1289  case MBEDGE:
1290  case MBTRI: {
1291  if (2 * data.getN().size2() != data.getDiffN().size2()) {
1292  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "data inconsistency");
1293  }
1294  unsigned int nb_dofs = data.getFieldData().size();
1295  if (nb_dofs % 3 != 0) {
1296  SETERRQ(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY, "data inconsistency");
1297  }
1298  if (nb_dofs > 3 * data.getN().size2()) {
1299  SETERRQ2(PETSC_COMM_SELF, MOFEM_DATA_INCONSISTENCY,
1300  "data inconsistency, side %d type %d", side, type);
1301  }
1302  for (unsigned int gg = 0; gg < data.getN().size1(); ++gg) {
1303  for (int dd = 0; dd < 3; dd++) {
1304  if ((type == MBTRI && valid_faces3[side]) ||
1305  (type == MBEDGE && valid_edges3[side])) {
1307  nb_dofs / 3, &data.getN(gg)[0], 1, &data.getFieldData()[dd], 3);
1308  tAngent1_at_GaussPtF3(gg, dd) +=
1309  cblas_ddot(nb_dofs / 3, &data.getDiffN()(gg, 0), 2,
1310  &data.getFieldData()[dd], 3);
1311  tAngent2_at_GaussPtF3(gg, dd) +=
1312  cblas_ddot(nb_dofs / 3, &data.getDiffN()(gg, 1), 2,
1313  &data.getFieldData()[dd], 3);
1314  } else if ((type == MBTRI && valid_faces4[side]) ||
1315  (type == MBEDGE && valid_edges4[side])) {
1317  nb_dofs / 3, &data.getN(gg)[0], 1, &data.getFieldData()[dd], 3);
1318  tAngent1_at_GaussPtF4(gg, dd) +=
1319  cblas_ddot(nb_dofs / 3, &data.getDiffN()(gg, 0), 2,
1320  &data.getFieldData()[dd], 3);
1321  tAngent2_at_GaussPtF4(gg, dd) +=
1322  cblas_ddot(nb_dofs / 3, &data.getDiffN()(gg, 1), 2,
1323  &data.getFieldData()[dd], 3);
1324  }
1325  }
1326  }
1327  } break;
1328  default:
1329  SETERRQ(PETSC_COMM_SELF, MOFEM_NOT_IMPLEMENTED, "not implemented");
1330  }
1331 
1333 }
1334 
1337 
1338  sPin.resize(3, 3);
1339  sPin.clear();
1340  nOrmals_at_GaussPtF3.resize(tAngent1_at_GaussPtF3.size1(), 3, false);
1341  for (unsigned int gg = 0; gg < tAngent1_at_GaussPtF3.size1(); ++gg) {
1342  ierr = Spin(&*sPin.data().begin(), &tAngent1_at_GaussPtF3(gg, 0));
1343  CHKERRG(ierr);
1344  cblas_dgemv(CblasRowMajor, CblasNoTrans, 3, 3, 1., &*sPin.data().begin(), 3,
1345  &tAngent2_at_GaussPtF3(gg, 0), 1, 0.,
1346  &nOrmals_at_GaussPtF3(gg, 0), 1);
1347  }
1348  sPin.clear();
1349  nOrmals_at_GaussPtF4.resize(tAngent1_at_GaussPtF4.size1(), 3, false);
1350  for (unsigned int gg = 0; gg < tAngent1_at_GaussPtF4.size1(); ++gg) {
1351  ierr = Spin(&*sPin.data().begin(), &tAngent1_at_GaussPtF4(gg, 0));
1352  CHKERRG(ierr);
1353  cblas_dgemv(CblasRowMajor, CblasNoTrans, 3, 3, 1., &*sPin.data().begin(), 3,
1354  &tAngent2_at_GaussPtF4(gg, 0), 1, 0.,
1355  &nOrmals_at_GaussPtF4(gg, 0), 1);
1356  }
1358 }
1359 
1361  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1363 
1364  if (type != MBTRI)
1366 
1367  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
1368 
1369  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
1370 
1371  int nb_gauss_pts = data.getN(base).size1();
1372  if (nb_gauss_pts) {
1374  auto t_normal =
1376  const double l02 = t_normal(i) * t_normal(i);
1377  int nb_base_functions = data.getN(base).size2() / 3;
1378  auto t_n = data.getFTensor1N<3>(base);
1379  if (normalsAtGaussPts.size1() == (unsigned int)nb_gauss_pts) {
1380  auto t_ho_normal =
1382  &normalsAtGaussPts(0, 0), &normalsAtGaussPts(0, 1),
1383  &normalsAtGaussPts(0, 2));
1384  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1385  for (int bb = 0; bb != nb_base_functions; ++bb) {
1386  const double v = t_n(0);
1387  const double l2 = t_ho_normal(i) * t_ho_normal(i);
1388  t_n(i) = (v / l2) * t_ho_normal(i);
1389  ++t_n;
1390  }
1391  ++t_ho_normal;
1392  }
1393  } else {
1394  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1395  for (int bb = 0; bb != nb_base_functions; ++bb) {
1396  const double v = t_n(0);
1397  t_n(i) = (v / l02) * t_normal(i);
1398  ++t_n;
1399  }
1400  }
1401  }
1402  }
1403  }
1404 
1406 }
1407 
1409  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1410 
1412 
1413  if (type != MBEDGE && type != MBTRI)
1415 
1419 
1420  // double zero = 0;
1422  &tAngent0[0], &tAngent1[0], &nOrmal[0], &tAngent0[1], &tAngent1[1],
1423  &nOrmal[1], &tAngent0[2], &tAngent1[2], &nOrmal[2], 3);
1424  double det;
1426  ierr = determinantTensor3by3(t_m, det);
1427  CHKERRG(ierr);
1428  ierr = invertTensor3by3(t_m, det, t_inv_m);
1429  CHKERRG(ierr);
1430 
1431  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
1432 
1433  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
1434 
1435  int nb_dofs = data.getN(base).size2() / 3;
1436  int nb_gauss_pts = data.getN(base).size1();
1437 
1438  MatrixDouble piola_n(data.getN(base).size1(), data.getN(base).size2());
1439  MatrixDouble diff_piola_n(data.getDiffN(base).size1(),
1440  data.getDiffN(base).size2());
1441 
1442  if (nb_dofs > 0 && nb_gauss_pts > 0) {
1443 
1444  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
1446  &data.getN(base)(0, HVEC0), &data.getN(base)(0, HVEC1),
1447  &data.getN(base)(0, HVEC2));
1449  &data.getDiffN(base)(0, HVEC0_0), &data.getDiffN(base)(0, HVEC0_1),
1450  &data.getDiffN(base)(0, HVEC1_0), &data.getDiffN(base)(0, HVEC1_1),
1451  &data.getDiffN(base)(0, HVEC2_0), &data.getDiffN(base)(0, HVEC2_1));
1452  FTensor::Tensor1<FTensor::PackPtr<double *, 3>, 3> t_transformed_h_curl(
1453  &piola_n(0, HVEC0), &piola_n(0, HVEC1), &piola_n(0, HVEC2));
1455  t_transformed_diff_h_curl(
1456  &diff_piola_n(0, HVEC0_0), &diff_piola_n(0, HVEC0_1),
1457  &diff_piola_n(0, HVEC1_0), &diff_piola_n(0, HVEC1_1),
1458  &diff_piola_n(0, HVEC2_0), &diff_piola_n(0, HVEC2_1));
1459 
1460  int cc = 0;
1461  if (normalsAtGaussPts.size1() == (unsigned int)nb_gauss_pts) {
1462  // HO geometry is set, so jacobian is different at each gauss point
1464  &tangent0AtGaussPt(0, 0), &tangent1AtGaussPt(0, 0),
1465  &normalsAtGaussPts(0, 0), &tangent0AtGaussPt(0, 1),
1466  &tangent1AtGaussPt(0, 1), &normalsAtGaussPts(0, 1),
1467  &tangent0AtGaussPt(0, 2), &tangent1AtGaussPt(0, 2),
1468  &normalsAtGaussPts(0, 2), 3);
1469  for (int gg = 0; gg < nb_gauss_pts; ++gg) {
1470  ierr = determinantTensor3by3(t_m_at_pts, det);
1471  CHKERRG(ierr);
1472  ierr = invertTensor3by3(t_m_at_pts, det, t_inv_m);
1473  CHKERRG(ierr);
1474  for (int ll = 0; ll != nb_dofs; ll++) {
1475  t_transformed_h_curl(i) = t_inv_m(j, i) * t_h_curl(j);
1476  t_transformed_diff_h_curl(i, k) =
1477  t_inv_m(j, i) * t_diff_h_curl(j, k);
1478  ++t_h_curl;
1479  ++t_transformed_h_curl;
1480  ++t_diff_h_curl;
1481  ++t_transformed_diff_h_curl;
1482  ++cc;
1483  }
1484  ++t_m_at_pts;
1485  }
1486  } else {
1487  for (int gg = 0; gg < nb_gauss_pts; ++gg) {
1488  for (int ll = 0; ll != nb_dofs; ll++) {
1489  t_transformed_h_curl(i) = t_inv_m(j, i) * t_h_curl(j);
1490  t_transformed_diff_h_curl(i, k) =
1491  t_inv_m(j, i) * t_diff_h_curl(j, k);
1492  ++t_h_curl;
1493  ++t_transformed_h_curl;
1494  ++t_diff_h_curl;
1495  ++t_transformed_diff_h_curl;
1496  ++cc;
1497  }
1498  }
1499  }
1500  if (cc != nb_gauss_pts * nb_dofs) {
1501  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSIBLE_CASE, "Data inconsistency");
1502  }
1503  data.getN(base).data().swap(piola_n.data());
1504  data.getDiffN(base).data().swap(diff_piola_n.data());
1505  }
1506  }
1507 
1509 }
1510 
1512 OpGetHoTangentOnEdge::doWork(int side, EntityType type,
1515 
1516  int nb_dofs = data.getFieldData().size();
1517  if (nb_dofs == 0)
1519 
1520  int nb_gauss_pts = data.getN().size1();
1521  tAngent.resize(nb_gauss_pts, 3, false);
1522 
1523  int nb_approx_fun = data.getN().size2();
1524  double *diff = &*data.getDiffN().data().begin();
1525  double *dofs[] = {&data.getFieldData()[0], &data.getFieldData()[1],
1526  &data.getFieldData()[2]};
1527 
1528  tAngent.resize(nb_gauss_pts, 3, false);
1529 
1530  switch (type) {
1531  case MBVERTEX:
1532  for (int dd = 0; dd != 3; dd++) {
1533  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1534  tAngent(gg, dd) = cblas_ddot(2, diff, 1, dofs[dd], 3);
1535  }
1536  }
1537  break;
1538  case MBEDGE:
1539  if (nb_dofs % 3) {
1540  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSIBLE_CASE,
1541  "Approximated field should be rank 3, i.e. vector in 3d space");
1542  }
1543  for (int dd = 0; dd != 3; dd++) {
1544  for (int gg = 0; gg != nb_gauss_pts; ++gg) {
1545  tAngent(gg, dd) +=
1546  cblas_ddot(nb_dofs / 3, &diff[gg * nb_approx_fun], 1, dofs[dd], 3);
1547  }
1548  }
1549  break;
1550  default:
1551  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSIBLE_CASE,
1552  "This operator can calculate tangent vector only on edge");
1553  }
1554 
1556 }
1557 
1559  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1561 
1562  if (type != MBEDGE)
1564 
1567  &tAngent[0], &tAngent[1], &tAngent[2]);
1568  const double l0 = t_m(i) * t_m(i);
1569  std::vector<double> l1;
1570  {
1571  int nb_gauss_pts = tangentAtGaussPt.size1();
1572  if (nb_gauss_pts) {
1573  l1.resize(nb_gauss_pts);
1575  &tangentAtGaussPt(0, 0), &tangentAtGaussPt(0, 1),
1576  &tangentAtGaussPt(0, 2));
1577  for (int gg = 0; gg < nb_gauss_pts; ++gg) {
1578  l1[gg] = t_m_at_pts(i) * t_m_at_pts(i);
1579  ++t_m_at_pts;
1580  }
1581  }
1582  }
1583 
1584  for (int b = AINSWORTH_LEGENDRE_BASE; b != LASTBASE; b++) {
1585 
1586  FieldApproximationBase base = static_cast<FieldApproximationBase>(b);
1587  int nb_gauss_pts = data.getN(base).size1();
1588  int nb_dofs = data.getN(base).size2() / 3;
1589  if (nb_gauss_pts > 0 && nb_dofs > 0) {
1591  &data.getN(base)(0, HVEC0), &data.getN(base)(0, HVEC1),
1592  &data.getN(base)(0, HVEC2));
1593  int cc = 0;
1594  if (tangentAtGaussPt.size1() == (unsigned int)nb_gauss_pts) {
1596  &tangentAtGaussPt(0, 0), &tangentAtGaussPt(0, 1),
1597  &tangentAtGaussPt(0, 2));
1598  for (int gg = 0; gg < nb_gauss_pts; ++gg) {
1599  const double l0 = l1[gg];
1600  for (int ll = 0; ll != nb_dofs; ll++) {
1601  const double val = t_h_curl(0);
1602  const double a = val / l0;
1603  t_h_curl(i) = t_m_at_pts(i) * a;
1604  ++t_h_curl;
1605  ++cc;
1606  }
1607  ++t_m_at_pts;
1608  }
1609  } else {
1610  for (int gg = 0; gg < nb_gauss_pts; ++gg) {
1611  for (int ll = 0; ll != nb_dofs; ll++) {
1612  const double val = t_h_curl(0);
1613  const double a = val / l0;
1614  t_h_curl(i) = t_m(i) * a;
1615  ++t_h_curl;
1616  ++cc;
1617  }
1618  }
1619  }
1620  if (cc != nb_gauss_pts * nb_dofs) {
1621  SETERRQ(PETSC_COMM_SELF, MOFEM_IMPOSIBLE_CASE, "Data inconsistency");
1622  }
1623  }
1624  }
1625 
1627 }
1628 
1629 template <>
1630 template <>
1633  double *ptr = &*data.data().begin();
1634  return FTensor::Tensor1<double *, 3>(ptr, &ptr[1], &ptr[2], 3);
1635 }
1636 
1637 template <>
1638 template <>
1641  double *ptr = &*data.data().begin();
1642  return FTensor::Tensor2<double *, 3, 3>(ptr, &ptr[1], &ptr[2], &ptr[3],
1643  &ptr[4], &ptr[5], &ptr[6], &ptr[7],
1644  &ptr[8], 9);
1645 }
1646 
1647 template <>
1649  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1651  if (data.getBase() == NOBASE)
1653  const unsigned int nb_gauss_pts = data.getN().size1();
1654  const unsigned int nb_base_functions = data.getN().size2();
1655  const unsigned int nb_dofs = data.getFieldData().size();
1656  if (!nb_dofs)
1658  auto t_n = data.getFTensor0N();
1659  auto t_val = getValAtGaussPtsTensor<3>(dataAtGaussPts);
1660  auto t_grad = getGradAtGaussPtsTensor<3, 3>(dataGradAtGaussPts);
1663  if (type == MBVERTEX &&
1664  data.getDiffN().data().size() == 3 * nb_base_functions) {
1665  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1666  auto t_data = data.getFTensor1FieldData<3>();
1667  auto t_diff_n = data.getFTensor1DiffN<3>();
1668  unsigned int bb = 0;
1669  for (; bb != nb_dofs / 3; ++bb) {
1670  t_val(i) += t_data(i) * t_n;
1671  t_grad(i, j) += t_data(i) * t_diff_n(j);
1672  ++t_n;
1673  ++t_diff_n;
1674  ++t_data;
1675  }
1676  ++t_val;
1677  ++t_grad;
1678  for (; bb != nb_base_functions; ++bb) {
1679  ++t_n;
1680  }
1681  }
1682  } else {
1683  auto t_diff_n = data.getFTensor1DiffN<3>();
1684  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1685  auto t_data = data.getFTensor1FieldData<3>();
1686  unsigned int bb = 0;
1687  for (; bb != nb_dofs / 3; ++bb) {
1688  t_val(i) += t_data(i) * t_n;
1689  t_grad(i, j) += t_data(i) * t_diff_n(j);
1690  ++t_n;
1691  ++t_diff_n;
1692  ++t_data;
1693  }
1694  ++t_val;
1695  ++t_grad;
1696  for (; bb != nb_base_functions; ++bb) {
1697  ++t_n;
1698  ++t_diff_n;
1699  }
1700  }
1701  }
1703 }
1704 
1705 template <>
1707  int side, EntityType type, DataForcesAndSourcesCore::EntData &data) {
1709  const unsigned int nb_gauss_pts = data.getN().size1();
1710  const unsigned int nb_base_functions = data.getN().size2();
1711  // bool constant_diff = false;
1712  const unsigned int nb_dofs = data.getFieldData().size();
1713  auto t_n = data.getFTensor0N();
1715  FTensor::Tensor0<double *>(&*dataAtGaussPts.data().begin(), 1);
1716  double *ptr = &*dataGradAtGaussPts.data().begin();
1717  FTensor::Tensor1<FTensor::PackPtr<double *, 3>, 3> t_grad(ptr, &ptr[1],
1718  &ptr[2]);
1720  if (type == MBVERTEX &&
1721  data.getDiffN().data().size() == 3 * nb_base_functions) {
1722  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1723  auto t_data = data.getFTensor0FieldData();
1724  auto t_diff_n = data.getFTensor1DiffN<3>();
1725  unsigned int bb = 0;
1726  for (; bb != nb_dofs / 3; ++bb) {
1727  t_val += t_data * t_n;
1728  t_grad(i) += t_data * t_diff_n(i);
1729  ++t_n;
1730  ++t_diff_n;
1731  ++t_data;
1732  }
1733  ++t_val;
1734  ++t_grad;
1735  for (; bb != nb_base_functions; ++bb) {
1736  ++t_n;
1737  }
1738  }
1739  } else {
1740  auto t_diff_n = data.getFTensor1DiffN<3>();
1741  for (unsigned int gg = 0; gg != nb_gauss_pts; ++gg) {
1742  auto t_data = data.getFTensor0FieldData();
1743  unsigned int bb = 0;
1744  for (; bb != nb_dofs / 3; ++bb) {
1745  t_val = t_data * t_n;
1746  t_grad(i) += t_data * t_diff_n(i);
1747  ++t_n;
1748  ++t_diff_n;
1749  ++t_data;
1750  }
1751  ++t_val;
1752  ++t_grad;
1753  for (; bb != nb_base_functions; ++bb) {
1754  ++t_n;
1755  ++t_diff_n;
1756  }
1757  }
1758  }
1760 }
1761 } // namespace MoFEM
MoFEMErrorCode invertTensor3by3(ublas::matrix< T, L, A > &jac_data, ublas::vector< T, A > &det_data, ublas::matrix< T, L, A > &inv_jac_data)
Calculate inverse of tensor rank 2 at integration points.
Definition: Templates.hpp:337
MatrixDouble diffNinvJac
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
void cblas_dgemv(const enum CBLAS_ORDER order, const enum CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double *A, const int lda, const double *X, const int incX, const double beta, double *Y, const int incY)
Definition: cblas_dgemv.c:11
FTensor::Index< 'j', 3 > j
data structure for finite element entityIt keeps that about indices of degrees of freedom,...
PetscErrorCode Spin(double *spinOmega, double *vecOmega)
calculate spin matrix from vector
Definition: fem_tools.c:558
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Index< 'i', 3 > i
FTensor::Index< 'i', 3 > i
#define MoFEMFunctionBeginHot
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:500
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
ublas::matrix< double, ublas::row_major, DoubleAllocator > MatrixDouble
Definition: Types.hpp:77
MoFEMErrorCode determinantTensor3by3(T1 &t, T2 &det)
Calculate determinant.
Definition: Templates.hpp:357
#define MoFEMFunctionReturn(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:476
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
const VectorDofs & getFieldDofs() const
get dofs data stature FEDofEntity
#define CHKERRG(n)
Check error code of MoFEM/MOAB/PETSc function.
Definition: definitions.h:543
FTensor::Tensor2< double *, 3, 3 > tInvJac
FTensor::Tensor2< double *, 3, 3 > tInvJac
FTensor::Tensor1< double *, Tensor_Dim > getFTensor1DiffN(const FieldApproximationBase base)
Get derivatives of base functions.
FTensor::Tensor1< FTensor::PackPtr< double *, Tensor_Dim >, Tensor_Dim > getFTensor1FieldData()
Return FTensor of rank 1, i.e. vector from filed data coeffinects.
virtual MoFEMErrorCode doWork(int row_side, int col_side, EntityType row_type, EntityType col_type, DataForcesAndSourcesCore::EntData &row_data, DataForcesAndSourcesCore::EntData &col_data)
Operator for bi-linear form, usually to calculate values on left hand side.
FTensor::Tensor2< double *, 3, 3 > tInvJac
static FTensor::Tensor0< FTensor::PackPtr< double *, 1 > > getFTensor0FromVec(ublas::vector< T, A > &data)
Get tensor rank 0 (scalar) form data vectorExample how to use it.
Definition: Templates.hpp:143
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
#define MoFEMFunctionReturnHot(a)
Last executable line of each PETSc function used for error handling. Replaces return()
Definition: definitions.h:507
FTensor::Index< 'i', 3 > i
virtual const MatrixDouble & getN(const FieldApproximationBase base) const
get base functions this return matrix (nb. of rows is equal to nb. of Gauss pts, nb....
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
implementation of Data Operators for Forces and Sources
Definition: Common.hpp:21
FTensor::Tensor0< FTensor::PackPtr< double *, 1 > > getFTensor0FieldData()
Resturn scalar files as a FTensor of rank 0.
boost::ptr_vector< EntData > dataOnEntities[MBMAXTYPE]
double cblas_ddot(const int N, const double *X, const int incX, const double *Y, const int incY)
Definition: cblas_ddot.c:12
FTensor::Index< 'i', 3 > i
static const char *const ApproximationBaseNames[]
Definition: definitions.h:157
virtual const MatrixDouble & getDiffN(const FieldApproximationBase base) const
get derivatives of base functions
Get field values and gradients at Gauss points.
FTensor::Index< 'j', 3 > j
FTensor::Index< 'k', 3 > k
const Tensor2_symmetric_Expr< const ddTensor0< T, Dim, i, j >, typename promote< T, double >::V, Dim, i, j > dd(const Tensor0< T * > &a, const Index< i, Dim > index1, const Index< j, Dim > index2, const Tensor1< int, Dim > &d_ijk, const Tensor1< double, Dim > &d_xyz)
Definition: ddTensor0.hpp:33
FieldApproximationBase
approximation base
Definition: definitions.h:142
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
const VectorDouble & getFieldData() const
get dofs values
PetscErrorCode MoFEMErrorCode
MoFEM/PETSc error code.
Definition: Exceptions.hpp:66
Ainsworth Cole (Legendre) approx. base .
Definition: definitions.h:144
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Index< 'k', 3 > k
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Tensor2< double *, 3, 3 > tJac
virtual MoFEMErrorCode opRhs(DataForcesAndSourcesCore &data, const bool do_vertices, const bool do_edges, const bool do_quads, const bool do_tris, const bool do_tets, const bool do_prisms, const bool error_if_no_base=true)
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FieldApproximationBase & getBase()
Get approximation base.
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Tensor2< FTensor::PackPtr< double *, Tensor_Dim0 *Tensor_Dim1 >, Tensor_Dim0, Tensor_Dim1 > getFTensor2DiffN(FieldApproximationBase base)
Get derivatives of base functions for Hdiv space.
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
Data on single entity (This is passed as argument to DataOperator::doWork)
FTensor::Index< 'j', 3 > j
#define MoFEMFunctionBegin
First executable line of each MoFEM function, used for error handling. Final line of MoFEM functions ...
Definition: definitions.h:406
virtual MoFEMErrorCode opLhs(DataForcesAndSourcesCore &row_data, DataForcesAndSourcesCore &col_data, bool symm=true)
FTensor::Index< 'j', 3 > j
ublas::vector< double, DoubleAllocator > VectorDouble
Definition: Types.hpp:76
FTensor::Tensor1< FTensor::PackPtr< double *, Tensor_Dim >, Tensor_Dim > getFTensor1N(FieldApproximationBase base)
Get base functions for Hdiv/Hcurl spaces.
MoFEMErrorCode doWork(int side, EntityType type, DataForcesAndSourcesCore::EntData &data)
Operator for linear form, usually to calculate values on right hand side.
FTensor::Tensor0< FTensor::PackPtr< double *, 1 > > getFTensor0N(const FieldApproximationBase base)
Get base function as Tensor0.
constexpr std::enable_if<(Dim0<=2 &&Dim1<=2), Tensor2_Expr< Levi_Civita< T >, T, Dim0, Dim1, i, j > >::type levi_civita(const Index< i, Dim0 > &, const Index< j, Dim1 > &)
levi_civita functions to make for easy adhoc use