New upstream version 2019.2.0~git20200919.42ceef3
Drew Parsons
3 years ago
29 | 29 | from FIAT.crouzeix_raviart import CrouzeixRaviart |
30 | 30 | from FIAT.regge import Regge |
31 | 31 | from FIAT.hellan_herrmann_johnson import HellanHerrmannJohnson |
32 | from FIAT.arnold_winther import ArnoldWinther | |
33 | from FIAT.arnold_winther import ArnoldWintherNC | |
34 | from FIAT.mardal_tai_winther import MardalTaiWinther | |
32 | 35 | from FIAT.bubble import Bubble, FacetBubble |
33 | 36 | from FIAT.tensor_product import TensorProductElement |
34 | 37 | from FIAT.enriched import EnrichedElement |
38 | 41 | from FIAT.mixed import MixedElement # noqa: F401 |
39 | 42 | from FIAT.restricted import RestrictedElement # noqa: F401 |
40 | 43 | from FIAT.quadrature_element import QuadratureElement # noqa: F401 |
44 | from FIAT.kong_mulder_veldhuizen import KongMulderVeldhuizen # noqa: F401 | |
41 | 45 | |
42 | 46 | # Important functionality |
43 | 47 | from FIAT.quadrature import make_quadrature # noqa: F401 |
63 | 67 | "Discontinuous Raviart-Thomas": DiscontinuousRaviartThomas, |
64 | 68 | "Hermite": CubicHermite, |
65 | 69 | "Lagrange": Lagrange, |
70 | "Kong-Mulder-Veldhuizen": KongMulderVeldhuizen, | |
66 | 71 | "Gauss-Lobatto-Legendre": GaussLobattoLegendre, |
67 | 72 | "Gauss-Legendre": GaussLegendre, |
68 | 73 | "Gauss-Radau": GaussRadau, |
76 | 81 | "TensorProductElement": TensorProductElement, |
77 | 82 | "BrokenElement": DiscontinuousElement, |
78 | 83 | "HDiv Trace": HDivTrace, |
79 | "Hellan-Herrmann-Johnson": HellanHerrmannJohnson} | |
84 | "Hellan-Herrmann-Johnson": HellanHerrmannJohnson, | |
85 | "Conforming Arnold-Winther": ArnoldWinther, | |
86 | "Nonconforming Arnold-Winther": ArnoldWintherNC, | |
87 | "Mardal-Tai-Winther": MardalTaiWinther} | |
80 | 88 | |
81 | 89 | # List of extra elements |
82 | 90 | extra_elements = {"P0": P0, |
141 | 141 | def __init__(self, ref_el): |
142 | 142 | poly_set = polynomial_set.ONPolynomialSet(ref_el, 5) |
143 | 143 | dual = QuinticArgyrisDualSet(ref_el) |
144 | super(QuinticArgyris, self).__init__(poly_set, dual, 5) | |
144 | super().__init__(poly_set, dual, 5) |
0 | # -*- coding: utf-8 -*- | |
1 | """Implementation of the Arnold-Winther finite elements.""" | |
2 | ||
3 | # Copyright (C) 2020 by Robert C. Kirby (Baylor University) | |
4 | # Modified by Francis Aznaran (Oxford University) | |
5 | # | |
6 | # This file is part of FIAT (https://www.fenicsproject.org) | |
7 | # | |
8 | # SPDX-License-Identifier: LGPL-3.0-or-later | |
9 | ||
10 | ||
11 | from FIAT.finite_element import CiarletElement | |
12 | from FIAT.dual_set import DualSet | |
13 | from FIAT.polynomial_set import ONSymTensorPolynomialSet, ONPolynomialSet | |
14 | from FIAT.functional import ( | |
15 | PointwiseInnerProductEvaluation as InnerProduct, | |
16 | FrobeniusIntegralMoment as FIM, | |
17 | IntegralMomentOfTensorDivergence, | |
18 | IntegralLegendreNormalNormalMoment, | |
19 | IntegralLegendreNormalTangentialMoment) | |
20 | ||
21 | from FIAT.quadrature import make_quadrature | |
22 | ||
23 | import numpy | |
24 | ||
25 | ||
26 | class ArnoldWintherNCDual(DualSet): | |
27 | def __init__(self, cell, degree): | |
28 | if not degree == 2: | |
29 | raise ValueError("Nonconforming Arnold-Winther elements are" | |
30 | "only defined for degree 2.") | |
31 | dofs = [] | |
32 | dof_ids = {} | |
33 | dof_ids[0] = {0: [], 1: [], 2: []} | |
34 | dof_ids[1] = {0: [], 1: [], 2: []} | |
35 | dof_ids[2] = {0: []} | |
36 | ||
37 | dof_cur = 0 | |
38 | # no vertex dofs | |
39 | # proper edge dofs now (not the contraints) | |
40 | # moments of normal . sigma against constants and linears. | |
41 | for entity_id in range(3): # a triangle has 3 edges | |
42 | for order in (0, 1): | |
43 | dofs += [IntegralLegendreNormalNormalMoment(cell, entity_id, order, 6), | |
44 | IntegralLegendreNormalTangentialMoment(cell, entity_id, order, 6)] | |
45 | dof_ids[1][entity_id] = list(range(dof_cur, dof_cur+4)) | |
46 | dof_cur += 4 | |
47 | ||
48 | # internal dofs: constant moments of three unique components | |
49 | Q = make_quadrature(cell, 2) | |
50 | ||
51 | e1 = numpy.array([1.0, 0.0]) # euclidean basis 1 | |
52 | e2 = numpy.array([0.0, 1.0]) # euclidean basis 2 | |
53 | basis = [(e1, e1), (e1, e2), (e2, e2)] # basis for symmetric matrices | |
54 | for (v1, v2) in basis: | |
55 | v1v2t = numpy.outer(v1, v2) | |
56 | fatqp = numpy.zeros((2, 2, len(Q.pts))) | |
57 | for i, y in enumerate(v1v2t): | |
58 | for j, x in enumerate(y): | |
59 | for k in range(len(Q.pts)): | |
60 | fatqp[i, j, k] = x | |
61 | dofs.append(FIM(cell, Q, fatqp)) | |
62 | dof_ids[2][0] = list(range(dof_cur, dof_cur + 3)) | |
63 | dof_cur += 3 | |
64 | ||
65 | # put the constraint dofs last. | |
66 | for entity_id in range(3): | |
67 | dof = IntegralLegendreNormalNormalMoment(cell, entity_id, 2, 6) | |
68 | dofs.append(dof) | |
69 | dof_ids[1][entity_id].append(dof_cur) | |
70 | dof_cur += 1 | |
71 | ||
72 | super(ArnoldWintherNCDual, self).__init__(dofs, cell, dof_ids) | |
73 | ||
74 | ||
75 | class ArnoldWintherNC(CiarletElement): | |
76 | """The definition of the nonconforming Arnold-Winther element. | |
77 | """ | |
78 | def __init__(self, cell, degree): | |
79 | assert degree == 2, "Only defined for degree 2" | |
80 | Ps = ONSymTensorPolynomialSet(cell, degree) | |
81 | Ls = ArnoldWintherNCDual(cell, degree) | |
82 | mapping = "double contravariant piola" | |
83 | ||
84 | super(ArnoldWintherNC, self).__init__(Ps, Ls, degree, | |
85 | mapping=mapping) | |
86 | ||
87 | ||
88 | class ArnoldWintherDual(DualSet): | |
89 | def __init__(self, cell, degree): | |
90 | if not degree == 3: | |
91 | raise ValueError("Arnold-Winther elements are" | |
92 | "only defined for degree 3.") | |
93 | dofs = [] | |
94 | dof_ids = {} | |
95 | dof_ids[0] = {0: [], 1: [], 2: []} | |
96 | dof_ids[1] = {0: [], 1: [], 2: []} | |
97 | dof_ids[2] = {0: []} | |
98 | ||
99 | dof_cur = 0 | |
100 | ||
101 | # vertex dofs | |
102 | vs = cell.get_vertices() | |
103 | e1 = numpy.array([1.0, 0.0]) | |
104 | e2 = numpy.array([0.0, 1.0]) | |
105 | basis = [(e1, e1), (e1, e2), (e2, e2)] | |
106 | ||
107 | dof_cur = 0 | |
108 | ||
109 | for entity_id in range(3): | |
110 | node = tuple(vs[entity_id]) | |
111 | for (v1, v2) in basis: | |
112 | dofs.append(InnerProduct(cell, v1, v2, node)) | |
113 | dof_ids[0][entity_id] = list(range(dof_cur, dof_cur + 3)) | |
114 | dof_cur += 3 | |
115 | ||
116 | # edge dofs now | |
117 | # moments of normal . sigma against constants and linears. | |
118 | for entity_id in range(3): | |
119 | for order in (0, 1): | |
120 | dofs += [IntegralLegendreNormalNormalMoment(cell, entity_id, order, 6), | |
121 | IntegralLegendreNormalTangentialMoment(cell, entity_id, order, 6)] | |
122 | dof_ids[1][entity_id] = list(range(dof_cur, dof_cur+4)) | |
123 | dof_cur += 4 | |
124 | ||
125 | # internal dofs: constant moments of three unique components | |
126 | Q = make_quadrature(cell, 3) | |
127 | ||
128 | e1 = numpy.array([1.0, 0.0]) # euclidean basis 1 | |
129 | e2 = numpy.array([0.0, 1.0]) # euclidean basis 2 | |
130 | basis = [(e1, e1), (e1, e2), (e2, e2)] # basis for symmetric matrices | |
131 | for (v1, v2) in basis: | |
132 | v1v2t = numpy.outer(v1, v2) | |
133 | fatqp = numpy.zeros((2, 2, len(Q.pts))) | |
134 | for k in range(len(Q.pts)): | |
135 | fatqp[:, :, k] = v1v2t | |
136 | dofs.append(FIM(cell, Q, fatqp)) | |
137 | dof_ids[2][0] = list(range(dof_cur, dof_cur + 3)) | |
138 | dof_cur += 3 | |
139 | ||
140 | # Constraint dofs | |
141 | ||
142 | Q = make_quadrature(cell, 5) | |
143 | ||
144 | onp = ONPolynomialSet(cell, 2, (2,)) | |
145 | pts = Q.get_points() | |
146 | onpvals = onp.tabulate(pts)[0, 0] | |
147 | ||
148 | for i in list(range(3, 6)) + list(range(9, 12)): | |
149 | dofs.append(IntegralMomentOfTensorDivergence(cell, Q, | |
150 | onpvals[i, :, :])) | |
151 | ||
152 | dof_ids[2][0] += list(range(dof_cur, dof_cur+6)) | |
153 | ||
154 | super(ArnoldWintherDual, self).__init__(dofs, cell, dof_ids) | |
155 | ||
156 | ||
157 | class ArnoldWinther(CiarletElement): | |
158 | """The definition of the conforming Arnold-Winther element. | |
159 | """ | |
160 | def __init__(self, cell, degree): | |
161 | assert degree == 3, "Only defined for degree 3" | |
162 | Ps = ONSymTensorPolynomialSet(cell, degree) | |
163 | Ls = ArnoldWintherDual(cell, degree) | |
164 | mapping = "double contravariant piola" | |
165 | super(ArnoldWinther, self).__init__(Ps, Ls, degree, mapping=mapping) |
17 | 17 | import sympy |
18 | 18 | |
19 | 19 | from FIAT import polynomial_set |
20 | from FIAT.quadrature import GaussLegendreQuadratureLineRule, QuadratureRule | |
21 | from FIAT.reference_element import UFCInterval as interval | |
20 | 22 | |
21 | 23 | |
22 | 24 | def index_iterator(shp): |
68 | 70 | self.functional_type = functional_type |
69 | 71 | if len(deriv_dict) > 0: |
70 | 72 | per_point = list(chain(*deriv_dict.values())) |
71 | alphas = [foo[1] for foo in per_point] | |
73 | alphas = [tuple(foo[1]) for foo in per_point] | |
72 | 74 | self.max_deriv_order = max([sum(foo) for foo in alphas]) |
73 | 75 | else: |
74 | 76 | self.max_deriv_order = 0 |
288 | 290 | """ |
289 | 291 | |
290 | 292 | def __init__(self, ref_el, Q, f_at_qpts, comp=tuple(), shp=tuple()): |
293 | self.Q = Q | |
291 | 294 | qpts, qwts = Q.get_points(), Q.get_weights() |
292 | 295 | pt_dict = OrderedDict() |
293 | 296 | self.comp = comp |
335 | 338 | {}, dpt_dict, "IntegralMomentOfNormalDerivative") |
336 | 339 | |
337 | 340 | |
341 | class IntegralLegendreDirectionalMoment(Functional): | |
342 | """Moment of v.s against a Legendre polynomial over an edge""" | |
343 | def __init__(self, cell, s, entity, mom_deg, comp_deg, nm=""): | |
344 | sd = cell.get_spatial_dimension() | |
345 | assert sd == 2 | |
346 | shp = (sd,) | |
347 | quadpoints = comp_deg + 1 | |
348 | Q = GaussLegendreQuadratureLineRule(interval(), quadpoints) | |
349 | legendre = numpy.polynomial.legendre.legval(2*Q.get_points()-1, [0]*mom_deg + [1]) | |
350 | f_at_qpts = numpy.array([s*legendre[i] for i in range(quadpoints)]) | |
351 | fmap = cell.get_entity_transform(sd-1, entity) | |
352 | mappedqpts = [fmap(pt) for pt in Q.get_points()] | |
353 | mappedQ = QuadratureRule(cell, mappedqpts, Q.get_weights()) | |
354 | qwts = mappedQ.wts | |
355 | qpts = mappedQ.pts | |
356 | ||
357 | pt_dict = OrderedDict() | |
358 | ||
359 | for k in range(len(qpts)): | |
360 | pt_cur = tuple(qpts[k]) | |
361 | pt_dict[pt_cur] = [(qwts[k] * f_at_qpts[k, i], (i,)) | |
362 | for i in range(2)] | |
363 | ||
364 | super().__init__(cell, shp, pt_dict, {}, nm) | |
365 | ||
366 | ||
367 | class IntegralLegendreNormalMoment(IntegralLegendreDirectionalMoment): | |
368 | """Moment of v.n against a Legendre polynomial over an edge""" | |
369 | def __init__(self, cell, entity, mom_deg, comp_deg): | |
370 | n = cell.compute_scaled_normal(entity) | |
371 | super().__init__(cell, n, entity, mom_deg, comp_deg, | |
372 | "IntegralLegendreNormalMoment") | |
373 | ||
374 | ||
375 | class IntegralLegendreTangentialMoment(IntegralLegendreDirectionalMoment): | |
376 | """Moment of v.t against a Legendre polynomial over an edge""" | |
377 | def __init__(self, cell, entity, mom_deg, comp_deg): | |
378 | t = cell.compute_edge_tangent(entity) | |
379 | super().__init__(cell, t, entity, mom_deg, comp_deg, | |
380 | "IntegralLegendreTangentialMoment") | |
381 | ||
382 | ||
383 | class IntegralLegendreBidirectionalMoment(Functional): | |
384 | """Moment of dot(s1, dot(tau, s2)) against Legendre on entity, multiplied by the size of the reference facet""" | |
385 | def __init__(self, cell, s1, s2, entity, mom_deg, comp_deg, nm=""): | |
386 | # mom_deg is degree of moment, comp_deg is the total degree of | |
387 | # polynomial you might need to integrate (or something like that) | |
388 | sd = cell.get_spatial_dimension() | |
389 | shp = (sd, sd) | |
390 | ||
391 | s1s2T = numpy.outer(s1, s2) | |
392 | quadpoints = comp_deg + 1 | |
393 | Q = GaussLegendreQuadratureLineRule(interval(), quadpoints) | |
394 | ||
395 | # The volume squared gets the Jacobian mapping from line interval | |
396 | # and the edge length into the functional. | |
397 | legendre = numpy.polynomial.legendre.legval(2*Q.get_points()-1, [0]*mom_deg + [1]) * numpy.abs(cell.volume_of_subcomplex(1, entity))**2 | |
398 | ||
399 | f_at_qpts = numpy.array([s1s2T*legendre[i] for i in range(quadpoints)]) | |
400 | ||
401 | # Map the quadrature points | |
402 | fmap = cell.get_entity_transform(sd-1, entity) | |
403 | mappedqpts = [fmap(pt) for pt in Q.get_points()] | |
404 | mappedQ = QuadratureRule(cell, mappedqpts, Q.get_weights()) | |
405 | ||
406 | pt_dict = OrderedDict() | |
407 | ||
408 | qpts = mappedQ.pts | |
409 | qwts = mappedQ.wts | |
410 | ||
411 | for k in range(len(qpts)): | |
412 | pt_cur = tuple(qpts[k]) | |
413 | pt_dict[pt_cur] = [(qwts[k] * f_at_qpts[k, i, j], (i, j)) | |
414 | for (i, j) in index_iterator(shp)] | |
415 | ||
416 | super().__init__(cell, shp, pt_dict, {}, nm) | |
417 | ||
418 | ||
419 | class IntegralLegendreNormalNormalMoment(IntegralLegendreBidirectionalMoment): | |
420 | """Moment of dot(n, dot(tau, n)) against Legendre on entity.""" | |
421 | def __init__(self, cell, entity, mom_deg, comp_deg): | |
422 | n = cell.compute_normal(entity) | |
423 | super().__init__(cell, n, n, entity, mom_deg, comp_deg, | |
424 | "IntegralNormalNormalLegendreMoment") | |
425 | ||
426 | ||
427 | class IntegralLegendreNormalTangentialMoment(IntegralLegendreBidirectionalMoment): | |
428 | """Moment of dot(n, dot(tau, t)) against Legendre on entity.""" | |
429 | def __init__(self, cell, entity, mom_deg, comp_deg): | |
430 | n = cell.compute_normal(entity) | |
431 | t = cell.compute_normalized_edge_tangent(entity) | |
432 | super().__init__(cell, n, t, entity, mom_deg, comp_deg, | |
433 | "IntegralNormalTangentialLegendreMoment") | |
434 | ||
435 | ||
436 | class IntegralMomentOfDivergence(Functional): | |
437 | """Functional representing integral of the divergence of the input | |
438 | against some tabulated function f.""" | |
439 | def __init__(self, ref_el, Q, f_at_qpts): | |
440 | self.f_at_qpts = f_at_qpts | |
441 | self.Q = Q | |
442 | ||
443 | sd = ref_el.get_spatial_dimension() | |
444 | ||
445 | qpts, qwts = Q.get_points(), Q.get_weights() | |
446 | dpts = qpts | |
447 | self.dpts = dpts | |
448 | ||
449 | dpt_dict = OrderedDict() | |
450 | ||
451 | alphas = [tuple([1 if j == i else 0 for j in range(sd)]) for i in range(sd)] | |
452 | for j, pt in enumerate(dpts): | |
453 | dpt_dict[tuple(pt)] = [(qwts[j]*f_at_qpts[j], alphas[i], (i,)) for i in range(sd)] | |
454 | ||
455 | super().__init__(ref_el, tuple(), {}, dpt_dict, | |
456 | "IntegralMomentOfDivergence") | |
457 | ||
458 | ||
459 | class IntegralMomentOfTensorDivergence(Functional): | |
460 | """Like IntegralMomentOfDivergence, but on symmetric tensors.""" | |
461 | ||
462 | def __init__(self, ref_el, Q, f_at_qpts): | |
463 | self.f_at_qpts = f_at_qpts | |
464 | self.Q = Q | |
465 | qpts, qwts = Q.get_points(), Q.get_weights() | |
466 | nqp = len(qpts) | |
467 | dpts = qpts | |
468 | self.dpts = dpts | |
469 | ||
470 | assert len(f_at_qpts.shape) == 2 | |
471 | assert f_at_qpts.shape[0] == 2 | |
472 | assert f_at_qpts.shape[1] == nqp | |
473 | ||
474 | sd = ref_el.get_spatial_dimension() | |
475 | ||
476 | dpt_dict = OrderedDict() | |
477 | ||
478 | alphas = [tuple([1 if j == i else 0 for j in range(sd)]) for i in range(sd)] | |
479 | for q, pt in enumerate(dpts): | |
480 | dpt_dict[tuple(pt)] = [(qwts[q]*f_at_qpts[i, q], alphas[j], (i, j)) for i in range(2) for j in range(2)] | |
481 | ||
482 | super().__init__(ref_el, tuple(), {}, dpt_dict, | |
483 | "IntegralMomentOfDivergence") | |
484 | ||
485 | ||
338 | 486 | class FrobeniusIntegralMoment(Functional): |
339 | 487 | |
340 | 488 | def __init__(self, ref_el, Q, f_at_qpts): |
341 | # f_at_qpts is num components x num_qpts | |
342 | if len(Q.get_points()) != f_at_qpts.shape[1]: | |
489 | # f_at_qpts is (some shape) x num_qpts | |
490 | shp = tuple(f_at_qpts.shape[:-1]) | |
491 | if len(Q.get_points()) != f_at_qpts.shape[-1]: | |
343 | 492 | raise Exception("Mismatch in number of quadrature points and values") |
344 | ||
345 | # make sure that shp is same shape as f given | |
346 | shp = (f_at_qpts.shape[0],) | |
347 | 493 | |
348 | 494 | qpts, qwts = Q.get_points(), Q.get_weights() |
349 | 495 | pt_dict = {} |
350 | for i in range(len(qpts)): | |
351 | pt_cur = tuple(qpts[i]) | |
352 | pt_dict[pt_cur] = [(qwts[i] * f_at_qpts[j, i], (j,)) | |
353 | for j in range(f_at_qpts.shape[0])] | |
354 | ||
355 | Functional.__init__(self, ref_el, shp, pt_dict, {}, "FrobeniusIntegralMoment") | |
496 | ||
497 | for i, (pt_cur, wt_cur) in enumerate(zip(map(tuple, qpts), qwts)): | |
498 | pt_dict[pt_cur] = [] | |
499 | for alfa in index_iterator(shp): | |
500 | qpidx = tuple(alfa + [i]) | |
501 | pt_dict[pt_cur].append((wt_cur * f_at_qpts[qpidx], tuple(alfa))) | |
502 | ||
503 | super().__init__(ref_el, shp, pt_dict, {}, "FrobeniusIntegralMoment") | |
356 | 504 | |
357 | 505 | |
358 | 506 | class PointNormalEvaluation(Functional): |
367 | 515 | pt_dict = {pt: [(n[i], (i,)) for i in range(sd)]} |
368 | 516 | |
369 | 517 | shp = (sd,) |
370 | Functional.__init__(self, ref_el, shp, pt_dict, {}, "PointNormalEval") | |
518 | super().__init__(ref_el, shp, pt_dict, {}, "PointNormalEval") | |
371 | 519 | |
372 | 520 | |
373 | 521 | class PointEdgeTangentEvaluation(Functional): |
380 | 528 | sd = ref_el.get_spatial_dimension() |
381 | 529 | pt_dict = {pt: [(t[i], (i,)) for i in range(sd)]} |
382 | 530 | shp = (sd,) |
383 | Functional.__init__(self, ref_el, shp, pt_dict, {}, "PointEdgeTangent") | |
531 | super().__init__(ref_el, shp, pt_dict, {}, "PointEdgeTangent") | |
384 | 532 | |
385 | 533 | def tostr(self): |
386 | 534 | x = list(map(str, list(self.pt_dict.keys())[0])) |
407 | 555 | pt_dict = OrderedDict() |
408 | 556 | for pt, wgt, phi in zip(pts, weights, P_at_qpts): |
409 | 557 | pt_dict[pt] = [(wgt*phi*t[i], (i, )) for i in range(sd)] |
410 | super().__init__(ref_el, (sd, ), pt_dict, {}, "IntegralMomentOfEdgeTangentEvaluation") | |
558 | super().__init__(ref_el, (sd, ), pt_dict, {}, | |
559 | "IntegralMomentOfEdgeTangentEvaluation") | |
411 | 560 | |
412 | 561 | |
413 | 562 | class PointFaceTangentEvaluation(Functional): |
455 | 604 | pt_dict[pt] = [(wgt*(-n[2]*phixn[1]+n[1]*phixn[2]), (0, )), |
456 | 605 | (wgt*(n[2]*phixn[0]-n[0]*phixn[2]), (1, )), |
457 | 606 | (wgt*(-n[1]*phixn[0]+n[0]*phixn[1]), (2, ))] |
458 | super().__init__(ref_el, (sd, ), pt_dict, {}, "IntegralMomentOfFaceTangentEvaluation") | |
607 | super().__init__(ref_el, (sd, ), pt_dict, {}, | |
608 | "IntegralMomentOfFaceTangentEvaluation") | |
459 | 609 | |
460 | 610 | |
461 | 611 | class MonkIntegralMoment(Functional): |
492 | 642 | shp = (sd,) |
493 | 643 | |
494 | 644 | pt_dict = {pt: [(self.n[i], (i,)) for i in range(sd)]} |
495 | Functional.__init__(self, ref_el, shp, pt_dict, {}, "PointScaledNormalEval") | |
645 | super().__init__(ref_el, shp, pt_dict, {}, "PointScaledNormalEval") | |
496 | 646 | |
497 | 647 | def tostr(self): |
498 | 648 | x = list(map(str, list(self.pt_dict.keys())[0])) |
542 | 692 | for i, j in index_iterator((sd, sd))]} |
543 | 693 | |
544 | 694 | shp = (sd, sd) |
545 | Functional.__init__(self, ref_el, shp, pt_dict, {}, "PointwiseInnerProductEval") | |
546 | ||
547 | ||
548 | class IntegralMomentOfDivergence(Functional): | |
549 | def __init__(self, ref_el, Q, f_at_qpts): | |
550 | self.f_at_qpts = f_at_qpts | |
551 | self.Q = Q | |
552 | ||
553 | sd = ref_el.get_spatial_dimension() | |
695 | super().__init__(ref_el, shp, pt_dict, {}, "PointwiseInnerProductEval") | |
696 | ||
697 | ||
698 | class TensorBidirectionalMomentInnerProductEvaluation(Functional): | |
699 | r""" | |
700 | This is a functional on symmetric 2-tensor fields. Let u be such a | |
701 | field, f a function tabulated at points, and v,w be vectors. This implements the evaluation | |
702 | \int v^T u(x) w f(x). | |
703 | ||
704 | Clearly v^iu_{ij}w^j = u_{ij}v^iw^j. Thus the value can be computed | |
705 | from the Frobenius inner product of u with wv^T. This gives the | |
706 | correct weights. | |
707 | """ | |
708 | ||
709 | def __init__(self, ref_el, v, w, Q, f_at_qpts, comp_deg): | |
710 | sd = ref_el.get_spatial_dimension() | |
711 | ||
712 | wvT = numpy.outer(w, v) | |
554 | 713 | |
555 | 714 | qpts, qwts = Q.get_points(), Q.get_weights() |
556 | dpts = qpts | |
557 | self.dpts = dpts | |
558 | ||
559 | dpt_dict = OrderedDict() | |
560 | ||
561 | alphas = [tuple([1 if j == i else 0 for j in range(sd)]) for i in range(sd)] | |
562 | for j, pt in enumerate(dpts): | |
563 | dpt_dict[tuple(pt)] = [(qwts[j]*f_at_qpts[j], alphas[i], (i,)) for i in range(sd)] | |
564 | ||
565 | Functional.__init__(self, ref_el, tuple(), | |
566 | {}, dpt_dict, "IntegralMomentOfDivergence") | |
567 | ||
568 | ||
569 | class IntegralMomentOfTensorDivergence(Functional): | |
570 | """Like IntegralMomentOfDivergence, but on symmetric tensors.""" | |
571 | ||
572 | def __init__(self, ref_el, Q, f_at_qpts): | |
573 | self.f_at_qpts = f_at_qpts | |
574 | self.Q = Q | |
575 | qpts, qwts = Q.get_points(), Q.get_weights() | |
576 | nqp = len(qpts) | |
577 | dpts = qpts | |
578 | self.dpts = dpts | |
579 | ||
580 | assert len(f_at_qpts.shape) == 2 | |
581 | assert f_at_qpts.shape[0] == 2 | |
582 | assert f_at_qpts.shape[1] == nqp | |
583 | ||
584 | sd = ref_el.get_spatial_dimension() | |
585 | ||
586 | dpt_dict = OrderedDict() | |
587 | ||
588 | alphas = [tuple([1 if j == i else 0 for j in range(sd)]) for i in range(sd)] | |
589 | for q, pt in enumerate(dpts): | |
590 | dpt_dict[tuple(pt)] = [(qwts[q]*f_at_qpts[i, q], alphas[j], (i, j)) for i in range(2) for j in range(2)] | |
591 | ||
592 | Functional.__init__(self, ref_el, tuple(), | |
593 | {}, dpt_dict, "IntegralMomentOfTensorDivergence") | |
715 | ||
716 | pt_dict = {} | |
717 | for k, pt in enumerate(map(tuple(qpts))): | |
718 | pt_dict[pt] = [] | |
719 | for i, j in index_iterator((sd, sd)): | |
720 | pt_dict[pt].append((qwts[k] * wvT[i][j] * f_at_qpts[i, j, k]), | |
721 | (i, j)) | |
722 | ||
723 | shp = (sd, sd) | |
724 | super().__init__(ref_el, shp, pt_dict, {}, "TensorBidirectionalMomentInnerProductEvaluation") | |
725 | ||
726 | ||
727 | class IntegralMomentOfNormalEvaluation(Functional): | |
728 | r""" | |
729 | \int_F v\cdot n p ds | |
730 | p \in Polynomials | |
731 | :arg ref_el: reference element for which F is a codim-1 entity | |
732 | :arg Q: quadrature rule on the face | |
733 | :arg P_at_qpts: polynomials evaluated at quad points | |
734 | :arg facet: which facet. | |
735 | """ | |
736 | def __init__(self, ref_el, Q, P_at_qpts, facet): | |
737 | # scaling on the normal is ok because edge length then weights | |
738 | # the reference element quadrature appropriately | |
739 | n = ref_el.compute_scaled_normal(facet) | |
740 | sd = ref_el.get_spatial_dimension() | |
741 | transform = ref_el.get_entity_transform(sd - 1, facet) | |
742 | pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points())) | |
743 | weights = Q.get_weights() | |
744 | pt_dict = OrderedDict() | |
745 | for pt, wgt, phi in zip(pts, weights, P_at_qpts): | |
746 | pt_dict[pt] = [(wgt*phi*n[i], (i, )) for i in range(sd)] | |
747 | super().__init__(ref_el, (sd, ), pt_dict, {}, "IntegralMomentOfScaledNormalEvaluation") | |
748 | ||
749 | ||
750 | class IntegralMomentOfTangentialEvaluation(Functional): | |
751 | r""" | |
752 | \int_F v\cdot n p ds | |
753 | p \in Polynomials | |
754 | :arg ref_el: reference element for which F is a codim-1 entity | |
755 | :arg Q: quadrature rule on the face | |
756 | :arg P_at_qpts: polynomials evaluated at quad points | |
757 | :arg facet: which facet. | |
758 | """ | |
759 | def __init__(self, ref_el, Q, P_at_qpts, facet): | |
760 | # scaling on the tangent is ok because edge length then weights | |
761 | # the reference element quadrature appropriately | |
762 | sd = ref_el.get_spatial_dimension() | |
763 | assert sd == 2 | |
764 | t = ref_el.compute_edge_tangent(facet) | |
765 | transform = ref_el.get_entity_transform(sd - 1, facet) | |
766 | pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points())) | |
767 | weights = Q.get_weights() | |
768 | pt_dict = OrderedDict() | |
769 | for pt, wgt, phi in zip(pts, weights, P_at_qpts): | |
770 | pt_dict[pt] = [(wgt*phi*t[i], (i, )) for i in range(sd)] | |
771 | super().__init__(ref_el, (sd, ), pt_dict, {}, "IntegralMomentOfScaledTangentialEvaluation") | |
772 | ||
773 | ||
774 | class IntegralMomentOfNormalNormalEvaluation(Functional): | |
775 | r""" | |
776 | \int_F (n^T tau n) p ds | |
777 | p \in Polynomials | |
778 | :arg ref_el: reference element for which F is a codim-1 entity | |
779 | :arg Q: quadrature rule on the face | |
780 | :arg P_at_qpts: polynomials evaluated at quad points | |
781 | :arg facet: which facet. | |
782 | """ | |
783 | def __init__(self, ref_el, Q, P_at_qpts, facet): | |
784 | # scaling on the normal is ok because edge length then weights | |
785 | # the reference element quadrature appropriately | |
786 | n = ref_el.compute_scaled_normal(facet) | |
787 | sd = ref_el.get_spatial_dimension() | |
788 | transform = ref_el.get_entity_transform(sd - 1, facet) | |
789 | pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points())) | |
790 | weights = Q.get_weights() | |
791 | pt_dict = OrderedDict() | |
792 | for pt, wgt, phi in zip(pts, weights, P_at_qpts): | |
793 | pt_dict[pt] = [(wgt*phi*n[i], (i, )) for i in range(sd)] | |
794 | super().__init__(ref_el, (sd, ), pt_dict, {}, "IntegralMomentOfScaledNormalEvaluation") |
69 | 69 | class CubicHermite(finite_element.CiarletElement): |
70 | 70 | """The cubic Hermite finite element. It is what it is.""" |
71 | 71 | |
72 | def __init__(self, ref_el): | |
72 | def __init__(self, ref_el, deg=3): | |
73 | assert deg == 3 | |
73 | 74 | poly_set = polynomial_set.ONPolynomialSet(ref_el, 3) |
74 | 75 | dual = CubicHermiteDualSet(ref_el) |
76 | ||
75 | 77 | super(CubicHermite, self).__init__(poly_set, dual, 3) |
0 | # Copyright (C) 2020 Robert C. Kirby (Baylor University) | |
1 | # | |
2 | # contributions by Keith Roberts (University of São Paulo) | |
3 | # | |
4 | # This file is part of FIAT (https://www.fenicsproject.org) | |
5 | # | |
6 | # SPDX-License-Identifier: LGPL-3.0-or-later | |
7 | from FIAT import ( | |
8 | finite_element, | |
9 | dual_set, | |
10 | functional, | |
11 | Bubble, | |
12 | FacetBubble, | |
13 | Lagrange, | |
14 | NodalEnrichedElement, | |
15 | RestrictedElement, | |
16 | reference_element, | |
17 | ) | |
18 | from FIAT.quadrature_schemes import create_quadrature | |
19 | ||
20 | TRIANGLE = reference_element.UFCTriangle() | |
21 | TETRAHEDRON = reference_element.UFCTetrahedron() | |
22 | ||
23 | ||
24 | def _get_entity_ids(ref_el, degree): | |
25 | """The topological association in a dictionary""" | |
26 | T = ref_el.topology | |
27 | sd = ref_el.get_spatial_dimension() | |
28 | if degree == 1: # works for any spatial dimension. | |
29 | entity_ids = {0: dict((i, [i]) for i in range(len(T[0])))} | |
30 | for d in range(1, sd + 1): | |
31 | entity_ids[d] = dict((i, []) for i in range(len(T[d]))) | |
32 | elif degree == 2: | |
33 | if sd == 2: | |
34 | entity_ids = { | |
35 | 0: dict((i, [i]) for i in range(3)), | |
36 | 1: dict((i, [i + 3]) for i in range(3)), | |
37 | 2: {0: [6]}, | |
38 | } | |
39 | elif sd == 3: | |
40 | entity_ids = { | |
41 | 0: dict((i, [i]) for i in range(4)), | |
42 | 1: dict((i, [i + 4]) for i in range(6)), | |
43 | 2: dict((i, [i + 10]) for i in range(4)), | |
44 | 3: {0: [14]}, | |
45 | } | |
46 | elif degree == 3: | |
47 | if sd == 2: | |
48 | etop = [[3, 4], [6, 5], [7, 8]] | |
49 | entity_ids = { | |
50 | 0: dict((i, [i]) for i in range(3)), | |
51 | 1: dict((i, etop[i]) for i in range(3)), | |
52 | 2: {0: [9, 10, 11]}, | |
53 | } | |
54 | elif sd == 3: | |
55 | etop = [[4, 5], [7, 6], [8, 9], [11, 10], [12, 13], [14, 15]] | |
56 | ftop = [[16, 17, 18], [19, 20, 21], [22, 23, 24], [25, 26, 27]] | |
57 | entity_ids = { | |
58 | 0: dict((i, [i]) for i in range(4)), | |
59 | 1: dict((i, etop[i]) for i in range(6)), | |
60 | 2: dict((i, ftop[i]) for i in range(4)), | |
61 | 3: {0: [28, 29, 30, 31]}, | |
62 | } | |
63 | elif degree == 4: | |
64 | if sd == 2: | |
65 | etop = [[6, 3, 7], [9, 4, 8], [10, 5, 11]] | |
66 | entity_ids = { | |
67 | 0: dict((i, [i]) for i in range(3)), | |
68 | 1: dict((i, etop[i]) for i in range(3)), | |
69 | 2: {0: [i for i in range(12, 18)]}, | |
70 | } | |
71 | elif degree == 5: | |
72 | if sd == 2: | |
73 | etop = [[9, 3, 4, 10], [12, 6, 5, 11], [13, 7, 8, 14]] | |
74 | entity_ids = { | |
75 | 0: dict((i, [i]) for i in range(3)), | |
76 | 1: dict((i, etop[i]) for i in range(3)), | |
77 | 2: {0: [i for i in range(15, 30)]}, | |
78 | } | |
79 | return entity_ids | |
80 | ||
81 | ||
82 | def bump(T, deg): | |
83 | """Increase degree of polynomial along face/edges""" | |
84 | sd = T.get_spatial_dimension() | |
85 | if deg == 1: | |
86 | return (0, 0) | |
87 | else: | |
88 | if sd == 2: | |
89 | if deg < 5: | |
90 | return (1, 1) | |
91 | elif deg == 5: | |
92 | return (2, 2) | |
93 | else: | |
94 | raise ValueError("Degree not supported") | |
95 | elif sd == 3: | |
96 | if deg < 4: | |
97 | return (1, 2) | |
98 | else: | |
99 | raise ValueError("Degree not supported") | |
100 | else: | |
101 | raise ValueError("Dimension of element is not supported") | |
102 | ||
103 | ||
104 | def KongMulderVeldhuizenSpace(T, deg): | |
105 | sd = T.get_spatial_dimension() | |
106 | if deg == 1: | |
107 | return Lagrange(T, 1).poly_set | |
108 | else: | |
109 | L = Lagrange(T, deg) | |
110 | # Toss the bubble from Lagrange since it's dependent | |
111 | # on the higher-dimensional bubbles | |
112 | if sd == 2: | |
113 | inds = [ | |
114 | i | |
115 | for i in range(L.space_dimension()) | |
116 | if i not in L.dual.entity_ids[sd][0] | |
117 | ] | |
118 | elif sd == 3: | |
119 | not_inds = [L.dual.entity_ids[sd][0]] + [ | |
120 | L.dual.entity_ids[sd - 1][f] for f in L.dual.entity_ids[sd - 1] | |
121 | ] | |
122 | not_inds = [item for sublist in not_inds for item in sublist] | |
123 | inds = [i for i in range(L.space_dimension()) if i not in not_inds] | |
124 | RL = RestrictedElement(L, inds) | |
125 | # interior cell bubble | |
126 | bubs = Bubble(T, deg + bump(T, deg)[1]) | |
127 | if sd == 2: | |
128 | return NodalEnrichedElement(RL, bubs).poly_set | |
129 | elif sd == 3: | |
130 | # bubble on the facet | |
131 | fbubs = FacetBubble(T, deg + bump(T, deg)[0]) | |
132 | return NodalEnrichedElement(RL, bubs, fbubs).poly_set | |
133 | ||
134 | ||
135 | class KongMulderVeldhuizenDualSet(dual_set.DualSet): | |
136 | """The dual basis for KMV simplical elements.""" | |
137 | ||
138 | def __init__(self, ref_el, degree): | |
139 | entity_ids = {} | |
140 | entity_ids = _get_entity_ids(ref_el, degree) | |
141 | lr = create_quadrature(ref_el, degree, scheme="KMV") | |
142 | nodes = [functional.PointEvaluation(ref_el, x) for x in lr.pts] | |
143 | super(KongMulderVeldhuizenDualSet, self).__init__(nodes, ref_el, entity_ids) | |
144 | ||
145 | ||
146 | class KongMulderVeldhuizen(finite_element.CiarletElement): | |
147 | """The "lumped" simplical finite element (NB: requires custom quad. "KMV" points to achieve a diagonal mass matrix). | |
148 | ||
149 | References | |
150 | ---------- | |
151 | ||
152 | Higher-order triangular and tetrahedral finite elements with mass | |
153 | lumping for solving the wave equation | |
154 | M. J. S. CHIN-JOE-KONG, W. A. MULDER and M. VAN VELDHUIZEN | |
155 | ||
156 | HIGHER-ORDER MASS-LUMPED FINITE ELEMENTS FOR THE WAVE EQUATION | |
157 | W.A. MULDER | |
158 | ||
159 | NEW HIGHER-ORDER MASS-LUMPED TETRAHEDRAL ELEMENTS | |
160 | S. GEEVERS, W.A. MULDER, AND J.J.W. VAN DER VEGT | |
161 | ||
162 | """ | |
163 | ||
164 | def __init__(self, ref_el, degree): | |
165 | if ref_el != TRIANGLE and ref_el != TETRAHEDRON: | |
166 | raise ValueError("KMV is only valid for triangles and tetrahedrals") | |
167 | if degree > 5 and ref_el == TRIANGLE: | |
168 | raise NotImplementedError("Only P < 6 for triangles are implemented.") | |
169 | if degree > 3 and ref_el == TETRAHEDRON: | |
170 | raise NotImplementedError("Only P < 4 for tetrahedrals are implemented.") | |
171 | S = KongMulderVeldhuizenSpace(ref_el, degree) | |
172 | ||
173 | dual = KongMulderVeldhuizenDualSet(ref_el, degree) | |
174 | formdegree = 0 # 0-form | |
175 | super(KongMulderVeldhuizen, self).__init__( | |
176 | S, dual, degree + max(bump(ref_el, degree)), formdegree | |
177 | ) |
0 | # -*- coding: utf-8 -*- | |
1 | """Implementation of the Mardal-Tai-Winther finite elements.""" | |
2 | ||
3 | # Copyright (C) 2020 by Robert C. Kirby (Baylor University) | |
4 | # | |
5 | # This file is part of FIAT (https://www.fenicsproject.org) | |
6 | # | |
7 | # SPDX-License-Identifier: LGPL-3.0-or-later | |
8 | ||
9 | ||
10 | from FIAT.finite_element import CiarletElement | |
11 | from FIAT.dual_set import DualSet | |
12 | from FIAT.polynomial_set import ONPolynomialSet | |
13 | from FIAT.functional import (IntegralMomentOfNormalEvaluation, | |
14 | IntegralMomentOfTangentialEvaluation, | |
15 | IntegralLegendreNormalMoment, | |
16 | IntegralMomentOfDivergence) | |
17 | ||
18 | from FIAT.quadrature import make_quadrature | |
19 | ||
20 | ||
21 | def DivergenceDubinerMoments(cell, start_deg, stop_deg, comp_deg): | |
22 | onp = ONPolynomialSet(cell, stop_deg) | |
23 | Q = make_quadrature(cell, comp_deg) | |
24 | ||
25 | pts = Q.get_points() | |
26 | onp = onp.tabulate(pts, 0)[0, 0] | |
27 | ||
28 | ells = [] | |
29 | ||
30 | for ii in range((start_deg)*(start_deg+1)//2, | |
31 | (stop_deg+1)*(stop_deg+2)//2): | |
32 | ells.append(IntegralMomentOfDivergence(cell, Q, onp[ii, :])) | |
33 | ||
34 | return ells | |
35 | ||
36 | ||
37 | class MardalTaiWintherDual(DualSet): | |
38 | """Degrees of freedom for Mardal-Tai-Winther elements.""" | |
39 | def __init__(self, cell, degree): | |
40 | dim = cell.get_spatial_dimension() | |
41 | if not dim == 2: | |
42 | raise ValueError("Mardal-Tai-Winther elements are only" | |
43 | "defined in dimension 2.") | |
44 | ||
45 | if not degree == 3: | |
46 | raise ValueError("Mardal-Tai-Winther elements are only defined" | |
47 | "for degree 3.") | |
48 | ||
49 | # construct the degrees of freedoms | |
50 | dofs = [] # list of functionals | |
51 | ||
52 | # dof_ids[i][j] contains the indices of dofs that are associated with | |
53 | # entity j in dim i | |
54 | dof_ids = {} | |
55 | ||
56 | # no vertex dof | |
57 | dof_ids[0] = {i: [] for i in range(dim + 1)} | |
58 | ||
59 | # edge dofs | |
60 | (_dofs, _dof_ids) = self._generate_edge_dofs(cell, degree) | |
61 | dofs.extend(_dofs) | |
62 | dof_ids[1] = _dof_ids | |
63 | ||
64 | # no cell dofs | |
65 | dof_ids[2] = {} | |
66 | dof_ids[2][0] = [] | |
67 | ||
68 | # extra dofs for enforcing div(v) constant over the cell and | |
69 | # v.n linear on edges | |
70 | (_dofs, _edge_dof_ids, _cell_dof_ids) = self._generate_constraint_dofs(cell, degree, len(dofs)) | |
71 | dofs.extend(_dofs) | |
72 | ||
73 | for entity_id in range(3): | |
74 | dof_ids[1][entity_id] = dof_ids[1][entity_id] + _edge_dof_ids[entity_id] | |
75 | ||
76 | dof_ids[2][0] = dof_ids[2][0] + _cell_dof_ids | |
77 | ||
78 | super(MardalTaiWintherDual, self).__init__(dofs, cell, dof_ids) | |
79 | ||
80 | @staticmethod | |
81 | def _generate_edge_dofs(cell, degree): | |
82 | """Generate dofs on edges. | |
83 | On each edge, let n be its normal. We need to integrate | |
84 | u.n and u.t against the first Legendre polynomial (constant) | |
85 | and u.n against the second (linear). | |
86 | """ | |
87 | dofs = [] | |
88 | dof_ids = {} | |
89 | offset = 0 | |
90 | sd = 2 | |
91 | ||
92 | facet = cell.get_facet_element() | |
93 | # Facet nodes are \int_F v\cdot n p ds where p \in P_{q-1} | |
94 | # degree is q - 1 | |
95 | Q = make_quadrature(facet, 6) | |
96 | Pq = ONPolynomialSet(facet, 1) | |
97 | Pq_at_qpts = Pq.tabulate(Q.get_points())[tuple([0]*(sd - 1))] | |
98 | for f in range(3): | |
99 | phi0 = Pq_at_qpts[0, :] | |
100 | dofs.append(IntegralMomentOfNormalEvaluation(cell, Q, phi0, f)) | |
101 | dofs.append(IntegralMomentOfTangentialEvaluation(cell, Q, phi0, f)) | |
102 | phi1 = Pq_at_qpts[1, :] | |
103 | dofs.append(IntegralMomentOfNormalEvaluation(cell, Q, phi1, f)) | |
104 | ||
105 | num_new_dofs = 3 | |
106 | dof_ids[f] = list(range(offset, offset + num_new_dofs)) | |
107 | offset += num_new_dofs | |
108 | ||
109 | return (dofs, dof_ids) | |
110 | ||
111 | @staticmethod | |
112 | def _generate_constraint_dofs(cell, degree, offset): | |
113 | """ | |
114 | Generate constraint dofs on the cell and edges | |
115 | * div(v) must be constant on the cell. Since v is a cubic and | |
116 | div(v) is quadratic, we need the integral of div(v) against the | |
117 | linear and quadratic Dubiner polynomials to vanish. | |
118 | There are two linear and three quadratics, so these are five | |
119 | constraints | |
120 | * v.n must be linear on each edge. Since v.n is cubic, we need | |
121 | the integral of v.n against the cubic and quadratic Legendre | |
122 | polynomial to vanish on each edge. | |
123 | ||
124 | So we introduce functionals whose kernel describes this property, | |
125 | as described in the FIAT paper. | |
126 | """ | |
127 | dofs = [] | |
128 | ||
129 | edge_dof_ids = {} | |
130 | for entity_id in range(3): | |
131 | dofs += [IntegralLegendreNormalMoment(cell, entity_id, 2, 6), | |
132 | IntegralLegendreNormalMoment(cell, entity_id, 3, 6)] | |
133 | ||
134 | edge_dof_ids[entity_id] = [offset, offset+1] | |
135 | offset += 2 | |
136 | ||
137 | cell_dofs = DivergenceDubinerMoments(cell, 1, 2, 6) | |
138 | dofs.extend(cell_dofs) | |
139 | cell_dof_ids = list(range(offset, offset+len(cell_dofs))) | |
140 | ||
141 | return (dofs, edge_dof_ids, cell_dof_ids) | |
142 | ||
143 | ||
144 | class MardalTaiWinther(CiarletElement): | |
145 | """The definition of the Mardal-Tai-Winther element. | |
146 | """ | |
147 | def __init__(self, cell, degree=3): | |
148 | assert degree == 3, "Only defined for degree 3" | |
149 | assert cell.get_spatial_dimension() == 2, "Only defined for dimension 2" | |
150 | # polynomial space | |
151 | Ps = ONPolynomialSet(cell, degree, (2,)) | |
152 | ||
153 | # degrees of freedom | |
154 | Ls = MardalTaiWintherDual(cell, degree) | |
155 | ||
156 | # mapping under affine transformation | |
157 | mapping = "contravariant piola" | |
158 | ||
159 | super(MardalTaiWinther, self).__init__(Ps, Ls, degree, | |
160 | mapping=mapping) |
44 | 44 | |
45 | 45 | entity_ids[2] = {0: []} |
46 | 46 | |
47 | super(MorleyDualSet, self).__init__(nodes, ref_el, entity_ids) | |
47 | super().__init__(nodes, ref_el, entity_ids) | |
48 | 48 | |
49 | 49 | |
50 | 50 | class Morley(finite_element.CiarletElement): |
53 | 53 | def __init__(self, ref_el): |
54 | 54 | poly_set = polynomial_set.ONPolynomialSet(ref_el, 2) |
55 | 55 | dual = MorleyDualSet(ref_el) |
56 | super(Morley, self).__init__(poly_set, dual, 2) | |
56 | super().__init__(poly_set, dual, 2) |
200 | 200 | for d in range(sd): |
201 | 201 | for i in range(Pkm1_at_qpts.shape[0]): |
202 | 202 | phi_cur = Pkm1_at_qpts[i, :] |
203 | l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,)) | |
203 | l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,)) | |
204 | 204 | nodes.append(l_cur) |
205 | 205 | |
206 | 206 | entity_ids = {} |
316 | 316 | for d in range(sd): |
317 | 317 | for i in range(Pkm2_at_qpts.shape[0]): |
318 | 318 | phi_cur = Pkm2_at_qpts[i, :] |
319 | f = functional.IntegralMoment(ref_el, Q, phi_cur, (d,)) | |
319 | f = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,)) | |
320 | 320 | nodes.append(f) |
321 | 321 | |
322 | 322 | entity_ids = {} |
0 | # Copyright (C) 2020 Robert C. Kirby (Baylor University) | |
1 | # | |
2 | # This file is part of FIAT (https://www.fenicsproject.org) | |
3 | # | |
4 | # SPDX-License-Identifier: LGPL-3.0-or-later | |
5 | # | |
6 | import numpy as np | |
7 | from FIAT.functional import Functional | |
8 | from FIAT.dual_set import DualSet | |
9 | from collections import defaultdict | |
10 | from itertools import zip_longest | |
11 | ||
12 | ||
13 | def compute_pointwise_dual(el, pts): | |
14 | """Constructs a dual basis to the basis for el as a linear combination | |
15 | of a set of pointwise evaluations. This is useful when the | |
16 | prescribed finite element isn't Ciarlet (e.g. the basis functions | |
17 | are provided explicitly as formulae). Alternately, the element's | |
18 | given dual basis may involve differentiation, making run-time | |
19 | interpolation difficult in FIAT clients. The pointwise dual, | |
20 | consisting only of pointwise evaluations, will effectively replace | |
21 | these derivatives with (automatically determined) finite | |
22 | differences. This is exact on the polynomial space, but is an | |
23 | approximation if applied to functions outside the space. | |
24 | ||
25 | :param el: a :class:`FiniteElement`. | |
26 | :param pts: an iterable of points with the same length as el's | |
27 | dimension. These points must be unisolvent for the | |
28 | polynomial space | |
29 | :returns: a :class `DualSet` | |
30 | """ | |
31 | nbf = el.space_dimension() | |
32 | ||
33 | T = el.ref_el | |
34 | sd = T.get_spatial_dimension() | |
35 | ||
36 | assert np.asarray(pts).shape == (int(nbf / np.prod(el.value_shape())), sd) | |
37 | ||
38 | z = tuple([0] * sd) | |
39 | ||
40 | nds = [] | |
41 | ||
42 | V = el.tabulate(0, pts)[z] | |
43 | ||
44 | # Make a square system, invert, and then put it back in the right | |
45 | # shape so we have (nbf, ..., npts) with more dimensions | |
46 | # for vector or tensor-valued elements. | |
47 | alphas = np.linalg.inv(V.reshape((nbf, -1)).T).reshape(V.shape) | |
48 | ||
49 | # Each row of alphas gives the coefficients of a functional, | |
50 | # represented, as elsewhere in FIAT, as a summation of | |
51 | # components of the input at particular points. | |
52 | ||
53 | # This logic picks out the points and components for which the | |
54 | # weights are actually nonzero to construct the functional. | |
55 | ||
56 | pts = np.asarray(pts) | |
57 | for coeffs in alphas: | |
58 | pt_dict = defaultdict(list) | |
59 | nonzero = np.where(np.abs(coeffs) > 1.e-12) | |
60 | *comp, pt_index = nonzero | |
61 | ||
62 | for pt, coeff_comp in zip(pts[pt_index], | |
63 | zip_longest(coeffs[nonzero], | |
64 | zip(*comp), fillvalue=())): | |
65 | pt_dict[tuple(pt)].append(coeff_comp) | |
66 | ||
67 | nds.append(Functional(T, el.value_shape(), dict(pt_dict), {}, "node")) | |
68 | ||
69 | return DualSet(nds, T, el.entity_dofs()) |
76 | 76 | return _fiat_scheme(ref_el, degree) |
77 | 77 | elif scheme == "canonical": |
78 | 78 | return _fiat_scheme(ref_el, degree) |
79 | elif scheme == "KMV": # Kong-Mulder-Veldhuizen scheme | |
80 | return _kmv_lump_scheme(ref_el, degree) | |
79 | 81 | else: |
80 | 82 | raise ValueError("Unknown quadrature scheme: %s." % scheme) |
81 | 83 | |
88 | 90 | |
89 | 91 | # Create and return FIAT quadrature rule |
90 | 92 | return make_quadrature(ref_el, num_points_per_axis) |
93 | ||
94 | ||
95 | def _kmv_lump_scheme(ref_el, degree): | |
96 | """Specialized quadrature schemes for P < 6 for KMV simplical elements.""" | |
97 | ||
98 | sd = ref_el.get_spatial_dimension() | |
99 | # set the unit element | |
100 | if sd == 2: | |
101 | T = UFCTriangle() | |
102 | elif sd == 3: | |
103 | T = UFCTetrahedron() | |
104 | else: | |
105 | raise ValueError("Dimension not supported") | |
106 | ||
107 | if degree == 1: | |
108 | x = ref_el.vertices | |
109 | w = arange(sd + 1, dtype=float64) | |
110 | if sd == 2: | |
111 | w[:] = 1.0 / 6.0 | |
112 | elif sd == 3: | |
113 | w[:] = 1.0 / 24.0 | |
114 | else: | |
115 | raise ValueError("Dimension not supported") | |
116 | elif degree == 2: | |
117 | if sd == 2: | |
118 | x = list(ref_el.vertices) | |
119 | for e in range(3): | |
120 | x.extend(ref_el.make_points(1, e, 2)) # edge midpoints | |
121 | x.extend(ref_el.make_points(2, 0, 3)) # barycenter | |
122 | w = arange(7, dtype=float64) | |
123 | w[0:3] = 1.0 / 40.0 | |
124 | w[3:6] = 1.0 / 15.0 | |
125 | w[6] = 9.0 / 40.0 | |
126 | elif sd == 3: | |
127 | x = list(ref_el.vertices) | |
128 | x.extend( | |
129 | [ | |
130 | (0.0, 0.50, 0.50), | |
131 | (0.50, 0.0, 0.50), | |
132 | (0.50, 0.50, 0.0), | |
133 | (0.0, 0.0, 0.50), | |
134 | (0.0, 0.50, 0.0), | |
135 | (0.50, 0.0, 0.0), | |
136 | ] | |
137 | ) | |
138 | # in facets | |
139 | x.extend( | |
140 | [ | |
141 | (0.33333333333333337, 0.3333333333333333, 0.3333333333333333), | |
142 | (0.0, 0.3333333333333333, 0.3333333333333333), | |
143 | (0.3333333333333333, 0.0, 0.3333333333333333), | |
144 | (0.3333333333333333, 0.3333333333333333, 0.0), | |
145 | ] | |
146 | ) | |
147 | # in the cell | |
148 | x.extend([(1 / 4, 1 / 4, 1 / 4)]) | |
149 | w = arange(15, dtype=float64) | |
150 | w[0:4] = 17.0 / 5040.0 | |
151 | w[4:10] = 2.0 / 315.0 | |
152 | w[10:14] = 9.0 / 560.0 | |
153 | w[14] = 16.0 / 315.0 | |
154 | else: | |
155 | raise ValueError("Dimension not supported") | |
156 | ||
157 | elif degree == 3: | |
158 | if sd == 2: | |
159 | alpha = 0.2934695559090401 | |
160 | beta = 0.2073451756635909 | |
161 | x = list(ref_el.vertices) | |
162 | x.extend( | |
163 | [ | |
164 | (1 - alpha, alpha), | |
165 | (alpha, 1 - alpha), | |
166 | (0.0, 1 - alpha), | |
167 | (0.0, alpha), | |
168 | (alpha, 0.0), | |
169 | (1 - alpha, 0.0), | |
170 | ] # edge points | |
171 | ) | |
172 | x.extend( | |
173 | [(beta, beta), (1 - 2 * beta, beta), (beta, 1 - 2 * beta)] | |
174 | ) # points in center of cell | |
175 | w = arange(12, dtype=float64) | |
176 | w[0:3] = 0.007436456512410291 | |
177 | w[3:9] = 0.02442084061702551 | |
178 | w[9:12] = 0.1103885289202054 | |
179 | elif sd == 3: | |
180 | x = list(ref_el.vertices) | |
181 | x.extend( | |
182 | [ | |
183 | (0, 0.685789657581967, 0.314210342418033), | |
184 | (0, 0.314210342418033, 0.685789657581967), | |
185 | (0.314210342418033, 0, 0.685789657581967), | |
186 | (0.685789657581967, 0, 0.314210342418033), | |
187 | (0.685789657581967, 0.314210342418033, 0.0), | |
188 | (0.314210342418033, 0.685789657581967, 0.0), | |
189 | (0, 0, 0.685789657581967), | |
190 | (0, 0, 0.314210342418033), | |
191 | (0, 0.314210342418033, 0.0), | |
192 | (0, 0.685789657581967, 0.0), | |
193 | (0.314210342418033, 0, 0.0), | |
194 | (0.685789657581967, 0, 0.0), | |
195 | ] | |
196 | ) # 12 points on edges of facets (0-->1-->2) | |
197 | ||
198 | x.extend( | |
199 | [ | |
200 | (0.21548220313557542, 0.5690355937288492, 0.21548220313557542), | |
201 | (0.21548220313557542, 0.21548220313557542, 0.5690355937288492), | |
202 | (0.5690355937288492, 0.21548220313557542, 0.21548220313557542), | |
203 | (0.0, 0.5690355937288492, 0.21548220313557542), | |
204 | (0.0, 0.21548220313557542, 0.5690355937288492), | |
205 | (0.0, 0.21548220313557542, 0.21548220313557542), | |
206 | (0.5690355937288492, 0.0, 0.21548220313557542), | |
207 | (0.21548220313557542, 0.0, 0.5690355937288492), | |
208 | (0.21548220313557542, 0.0, 0.21548220313557542), | |
209 | (0.5690355937288492, 0.21548220313557542, 0.0), | |
210 | (0.21548220313557542, 0.5690355937288492, 0.0), | |
211 | (0.21548220313557542, 0.21548220313557542, 0.0), | |
212 | ] | |
213 | ) # 12 points (3 points on each facet, 1st two parallel to edge 0) | |
214 | alpha = 1 / 6 | |
215 | x.extend( | |
216 | [ | |
217 | (alpha, alpha, 0.5), | |
218 | (0.5, alpha, alpha), | |
219 | (alpha, 0.5, alpha), | |
220 | (alpha, alpha, alpha), | |
221 | ] | |
222 | ) # 4 points inside the cell | |
223 | w = arange(32, dtype=float64) | |
224 | w[0:4] = 0.00068688236002531922325120561367839 | |
225 | w[4:16] = 0.0015107814913526136472998739890272 | |
226 | w[16:28] = 0.0050062894680040258624242888174649 | |
227 | w[28:32] = 0.021428571428571428571428571428571 | |
228 | else: | |
229 | raise ValueError("Dimension not supported") | |
230 | elif degree == 4: | |
231 | if sd == 2: | |
232 | alpha = 0.2113248654051871 # 0.2113248654051871 | |
233 | beta1 = 0.4247639617258106 # 0.4247639617258106 | |
234 | beta2 = 0.130791593829745 # 0.130791593829745 | |
235 | x = list(ref_el.vertices) | |
236 | for e in range(3): | |
237 | x.extend(ref_el.make_points(1, e, 2)) # edge midpoints | |
238 | x.extend( | |
239 | [ | |
240 | (1 - alpha, alpha), | |
241 | (alpha, 1 - alpha), | |
242 | (0.0, 1 - alpha), | |
243 | (0.0, alpha), | |
244 | (alpha, 0.0), | |
245 | (1 - alpha, 0.0), | |
246 | ] # edge points | |
247 | ) | |
248 | x.extend( | |
249 | [(beta1, beta1), (1 - 2 * beta1, beta1), (beta1, 1 - 2 * beta1)] | |
250 | ) # points in center of cell | |
251 | x.extend( | |
252 | [(beta2, beta2), (1 - 2 * beta2, beta2), (beta2, 1 - 2 * beta2)] | |
253 | ) # points in center of cell | |
254 | w = arange(18, dtype=float64) | |
255 | w[0:3] = 0.003174603174603175 # chk | |
256 | w[3:6] = 0.0126984126984127 # chk 0.0126984126984127 | |
257 | w[6:12] = 0.01071428571428571 # chk 0.01071428571428571 | |
258 | w[12:15] = 0.07878121446939182 # chk 0.07878121446939182 | |
259 | w[15:18] = 0.05058386489568756 # chk 0.05058386489568756 | |
260 | else: | |
261 | raise ValueError("Dimension not supported") | |
262 | ||
263 | elif degree == 5: | |
264 | if sd == 2: | |
265 | alpha1 = 0.3632980741536860e-00 | |
266 | alpha2 = 0.1322645816327140e-00 | |
267 | beta1 = 0.4578368380791611e-00 | |
268 | beta2 = 0.2568591072619591e-00 | |
269 | beta3 = 0.5752768441141011e-01 | |
270 | gamma1 = 0.7819258362551702e-01 | |
271 | delta1 = 0.2210012187598900e-00 | |
272 | x = list(ref_el.vertices) | |
273 | x.extend( | |
274 | [ | |
275 | (1 - alpha1, alpha1), | |
276 | (alpha1, 1 - alpha1), | |
277 | (0.0, 1 - alpha1), | |
278 | (0.0, alpha1), | |
279 | (alpha1, 0.0), | |
280 | (1 - alpha1, 0.0), | |
281 | ] # edge points | |
282 | ) | |
283 | x.extend( | |
284 | [ | |
285 | (1 - alpha2, alpha2), | |
286 | (alpha2, 1 - alpha2), | |
287 | (0.0, 1 - alpha2), | |
288 | (0.0, alpha2), | |
289 | (alpha2, 0.0), | |
290 | (1 - alpha2, 0.0), | |
291 | ] # edge points | |
292 | ) | |
293 | x.extend( | |
294 | [(beta1, beta1), (1 - 2 * beta1, beta1), (beta1, 1 - 2 * beta1)] | |
295 | ) # points in center of cell | |
296 | x.extend( | |
297 | [(beta2, beta2), (1 - 2 * beta2, beta2), (beta2, 1 - 2 * beta2)] | |
298 | ) # points in center of cell | |
299 | x.extend( | |
300 | [(beta3, beta3), (1 - 2 * beta3, beta3), (beta3, 1 - 2 * beta3)] | |
301 | ) # points in center of cell | |
302 | x.extend( | |
303 | [ | |
304 | (gamma1, delta1), | |
305 | (1 - gamma1 - delta1, delta1), | |
306 | (gamma1, 1 - gamma1 - delta1), | |
307 | (delta1, gamma1), | |
308 | (1 - gamma1 - delta1, gamma1), | |
309 | (delta1, 1 - gamma1 - delta1), | |
310 | ] # edge points | |
311 | ) | |
312 | w = arange(30, dtype=float64) | |
313 | w[0:3] = 0.7094239706792450e-03 | |
314 | w[3:9] = 0.6190565003676629e-02 | |
315 | w[9:15] = 0.3480578640489211e-02 | |
316 | w[15:18] = 0.3453043037728279e-01 | |
317 | w[18:21] = 0.4590123763076286e-01 | |
318 | w[21:24] = 0.1162613545961757e-01 | |
319 | w[24:30] = 0.2727857596999626e-01 | |
320 | else: | |
321 | raise ValueError("Dimension not supported") | |
322 | ||
323 | # Return scheme | |
324 | return QuadratureRule(T, x, w) | |
91 | 325 | |
92 | 326 | |
93 | 327 | def _triangle_scheme(degree): |
13 | 13 | from FIAT.polynomial_set import mis |
14 | 14 | from FIAT.reference_element import (compute_unflattening_map, |
15 | 15 | flatten_reference_cube) |
16 | from FIAT.reference_element import make_lattice | |
17 | ||
18 | from FIAT.pointwise_dual import compute_pointwise_dual | |
16 | 19 | |
17 | 20 | x, y, z = symbols('x y z') |
18 | 21 | variables = (x, y, z) |
122 | 125 | self._degree = degree |
123 | 126 | self.flat_el = flat_el |
124 | 127 | |
128 | self.dual = compute_pointwise_dual(self, unisolvent_pts(ref_el, degree)) | |
129 | ||
125 | 130 | def degree(self): |
126 | 131 | return self._degree + 1 |
127 | 132 | |
137 | 142 | def tabulate(self, order, points, entity=None): |
138 | 143 | |
139 | 144 | if entity is None: |
140 | entity = (self.ref_el.get_spatial_dimension(), 0) | |
145 | entity = (self.ref_el.get_dimension(), 0) | |
141 | 146 | |
142 | 147 | entity_dim, entity_id = entity |
143 | 148 | transform = self.ref_el.get_entity_transform(entity_dim, entity_id) |
236 | 241 | for l in range(6, i + 1) for j in range(l-5) for k in range(j+1)]) |
237 | 242 | |
238 | 243 | return IL |
244 | ||
245 | ||
246 | def unisolvent_pts(K, deg): | |
247 | flat_el = flatten_reference_cube(K) | |
248 | dim = flat_el.get_spatial_dimension() | |
249 | if dim == 2: | |
250 | return unisolvent_pts_quad(flat_el, deg) | |
251 | elif dim == 3: | |
252 | return unisolvent_pts_hex(flat_el, deg) | |
253 | else: | |
254 | raise ValueError("Serendipity only defined for quads and hexes") | |
255 | ||
256 | ||
257 | def unisolvent_pts_quad(K, deg): | |
258 | """Gives a set of unisolvent points for the quad serendipity space of order deg. | |
259 | The S element is not dual to these nodes, but a dual basis can be constructed from them.""" | |
260 | L = K.construct_subelement(1) | |
261 | vs = np.asarray(K.vertices) | |
262 | pts = [pt for pt in K.vertices] | |
263 | Lpts = make_lattice(L.vertices, deg, 1) | |
264 | for e in K.topology[1]: | |
265 | Fmap = K.get_entity_transform(1, e) | |
266 | epts = [tuple(Fmap(pt)) for pt in Lpts] | |
267 | pts.extend(epts) | |
268 | if deg > 3: | |
269 | dx0 = (vs[1, :] - vs[0, :]) / (deg-2) | |
270 | dx1 = (vs[2, :] - vs[0, :]) / (deg-2) | |
271 | ||
272 | internal_nodes = [tuple(vs[0, :] + dx0 * i + dx1 * j) | |
273 | for i in range(1, deg-2) | |
274 | for j in range(1, deg-1-i)] | |
275 | pts.extend(internal_nodes) | |
276 | ||
277 | return pts | |
278 | ||
279 | ||
280 | def unisolvent_pts_hex(K, deg): | |
281 | """Gives a set of unisolvent points for the hex serendipity space of order deg. | |
282 | The S element is not dual to these nodes, but a dual basis can be constructed from them.""" | |
283 | L = K.construct_subelement(1) | |
284 | F = K.construct_subelement(2) | |
285 | vs = np.asarray(K.vertices) | |
286 | pts = [pt for pt in K.vertices] | |
287 | Lpts = make_lattice(L.vertices, deg, 1) | |
288 | for e in K.topology[1]: | |
289 | Fmap = K.get_entity_transform(1, e) | |
290 | epts = [tuple(Fmap(pt)) for pt in Lpts] | |
291 | pts.extend(epts) | |
292 | if deg > 3: | |
293 | fvs = np.asarray(F.vertices) | |
294 | # Planar points to map to each face | |
295 | dx0 = (fvs[1, :] - fvs[0, :]) / (deg-2) | |
296 | dx1 = (fvs[2, :] - fvs[0, :]) / (deg-2) | |
297 | ||
298 | Fpts = [tuple(fvs[0, :] + dx0 * i + dx1 * j) | |
299 | for i in range(1, deg-2) | |
300 | for j in range(1, deg-1-i)] | |
301 | for f in K.topology[2]: | |
302 | Fmap = K.get_entity_transform(2, f) | |
303 | pts.extend([tuple(Fmap(pt)) for pt in Fpts]) | |
304 | if deg > 5: | |
305 | dx0 = np.asarray([1., 0, 0]) / (deg-4) | |
306 | dx1 = np.asarray([0, 1., 0]) / (deg-4) | |
307 | dx2 = np.asarray([0, 0, 1.]) / (deg-4) | |
308 | ||
309 | Ipts = [tuple(vs[0, :] + dx0 * i + dx1 * j + dx2 * k) | |
310 | for i in range(1, deg-4) | |
311 | for j in range(1, deg-3-i) | |
312 | for k in range(1, deg-2-i-j)] | |
313 | pts.extend(Ipts) | |
314 | ||
315 | return pts |
0 | import numpy as np | |
1 | from FIAT import ufc_simplex, ArnoldWinther, make_quadrature, expansions | |
2 | ||
3 | ||
4 | def test_dofs(): | |
5 | line = ufc_simplex(1) | |
6 | T = ufc_simplex(2) | |
7 | T.vertices = np.random.rand(3, 2) | |
8 | AW = ArnoldWinther(T, 3) | |
9 | ||
10 | # check Kronecker property at vertices | |
11 | ||
12 | bases = [[[1, 0], [0, 0]], [[0, 1], [1, 0]], [[0, 0], [0, 1]]] | |
13 | ||
14 | vert_vals = AW.tabulate(0, T.vertices)[(0, 0)] | |
15 | for i in range(3): | |
16 | for j in range(3): | |
17 | assert np.allclose(vert_vals[3*i+j, :, :, i], bases[j]) | |
18 | for k in (1, 2): | |
19 | assert np.allclose(vert_vals[3*i+j, :, :, (i+k) % 3], np.zeros((2, 2))) | |
20 | ||
21 | # check edge moments | |
22 | Qline = make_quadrature(line, 6) | |
23 | ||
24 | linebfs = expansions.LineExpansionSet(line) | |
25 | linevals = linebfs.tabulate(1, Qline.pts) | |
26 | ||
27 | # n, n moments | |
28 | for ed in range(3): | |
29 | n = T.compute_scaled_normal(ed) | |
30 | wts = np.asarray(Qline.wts) | |
31 | nqpline = len(wts) | |
32 | ||
33 | vals = AW.tabulate(0, Qline.pts, (1, ed))[(0, 0)] | |
34 | nnvals = np.zeros((30, nqpline)) | |
35 | for i in range(30): | |
36 | for j in range(len(wts)): | |
37 | nnvals[i, j] = n @ vals[i, :, :, j] @ n | |
38 | ||
39 | nnmoments = np.zeros((30, 2)) | |
40 | ||
41 | for bf in range(30): | |
42 | for k in range(nqpline): | |
43 | for m in (0, 1): | |
44 | nnmoments[bf, m] += wts[k] * nnvals[bf, k] * linevals[m, k] | |
45 | ||
46 | for bf in range(30): | |
47 | if bf != AW.dual.entity_ids[1][ed][0] and bf != AW.dual.entity_ids[1][ed][2]: | |
48 | assert np.allclose(nnmoments[bf, :], np.zeros(2)) | |
49 | ||
50 | # n, t moments | |
51 | for ed in range(3): | |
52 | n = T.compute_scaled_normal(ed) | |
53 | t = T.compute_edge_tangent(ed) | |
54 | wts = np.asarray(Qline.wts) | |
55 | nqpline = len(wts) | |
56 | ||
57 | vals = AW.tabulate(0, Qline.pts, (1, ed))[(0, 0)] | |
58 | ntvals = np.zeros((30, nqpline)) | |
59 | for i in range(30): | |
60 | for j in range(len(wts)): | |
61 | ntvals[i, j] = n @ vals[i, :, :, j] @ t | |
62 | ||
63 | ntmoments = np.zeros((30, 2)) | |
64 | ||
65 | for bf in range(30): | |
66 | for k in range(nqpline): | |
67 | for m in (0, 1): | |
68 | ntmoments[bf, m] += wts[k] * ntvals[bf, k] * linevals[m, k] | |
69 | ||
70 | for bf in range(30): | |
71 | if bf != AW.dual.entity_ids[1][ed][1] and bf != AW.dual.entity_ids[1][ed][3]: | |
72 | assert np.allclose(ntmoments[bf, :], np.zeros(2)) | |
73 | ||
74 | # check internal dofs | |
75 | Q = make_quadrature(T, 6) | |
76 | qpvals = AW.tabulate(0, Q.pts)[(0, 0)] | |
77 | const_moms = qpvals @ Q.wts | |
78 | assert np.allclose(const_moms[:21], np.zeros((21, 2, 2))) | |
79 | assert np.allclose(const_moms[24:], np.zeros((6, 2, 2))) | |
80 | assert np.allclose(const_moms[21:24, 0, 0], np.asarray([1, 0, 0])) | |
81 | assert np.allclose(const_moms[21:24, 0, 1], np.asarray([0, 1, 0])) | |
82 | assert np.allclose(const_moms[21:24, 1, 0], np.asarray([0, 1, 0])) | |
83 | assert np.allclose(const_moms[21:24, 1, 1], np.asarray([0, 0, 1])) | |
84 | ||
85 | ||
86 | def frob(a, b): | |
87 | return a.ravel() @ b.ravel() | |
88 | ||
89 | ||
90 | def test_projection(): | |
91 | T = ufc_simplex(2) | |
92 | T.vertices = np.asarray([(0.0, 0.0), (1.0, 0.0), (0.5, 2.1)]) | |
93 | ||
94 | AW = ArnoldWinther(T, 3) | |
95 | ||
96 | Q = make_quadrature(T, 4) | |
97 | qpts = np.asarray(Q.pts) | |
98 | qwts = np.asarray(Q.wts) | |
99 | nqp = len(Q.wts) | |
100 | ||
101 | nbf = 24 | |
102 | m = np.zeros((nbf, nbf)) | |
103 | b = np.zeros((24,)) | |
104 | rhs_vals = np.zeros((2, 2, nqp)) | |
105 | ||
106 | bfvals = AW.tabulate(0, qpts)[(0, 0)][:nbf, :, :, :] | |
107 | ||
108 | for i in range(nbf): | |
109 | for j in range(nbf): | |
110 | for k in range(nqp): | |
111 | m[i, j] += qwts[k] * frob(bfvals[i, :, :, k], | |
112 | bfvals[j, :, :, k]) | |
113 | ||
114 | assert np.linalg.cond(m) < 1.e12 | |
115 | ||
116 | comps = [(0, 0), (0, 1), (0, 0)] | |
117 | ||
118 | # loop over monomials up to degree 2 | |
119 | for deg in range(3): | |
120 | for jj in range(deg+1): | |
121 | ii = deg-jj | |
122 | for comp in comps: | |
123 | b[:] = 0.0 | |
124 | # set RHS (symmetrically) to be the monomial in | |
125 | # the proper component. | |
126 | rhs_vals[comp] = qpts[:, 0]**ii * qpts[:, 1]**jj | |
127 | rhs_vals[tuple(reversed(comp))] = rhs_vals[comp] | |
128 | for i in range(nbf): | |
129 | for k in range(nqp): | |
130 | b[i] += qwts[k] * frob(bfvals[i, :, :, k], | |
131 | rhs_vals[:, :, k]) | |
132 | x = np.linalg.solve(m, b) | |
133 | ||
134 | sol_at_qpts = np.zeros(rhs_vals.shape) | |
135 | for i in range(nbf): | |
136 | for k in range(nqp): | |
137 | sol_at_qpts[:, :, k] += x[i] * bfvals[i, :, :, k] | |
138 | ||
139 | diff = sol_at_qpts - rhs_vals | |
140 | err = 0.0 | |
141 | for k in range(nqp): | |
142 | err += qwts[k] * frob(diff[:, :, k], diff[:, :, k]) | |
143 | ||
144 | assert np.sqrt(err) < 1.e-12 |
0 | import numpy as np | |
1 | from FIAT import ufc_simplex, ArnoldWintherNC, make_quadrature, expansions | |
2 | ||
3 | ||
4 | def test_dofs(): | |
5 | line = ufc_simplex(1) | |
6 | T = ufc_simplex(2) | |
7 | T.vertices = np.random.rand(3, 2) | |
8 | AW = ArnoldWintherNC(T, 2) | |
9 | ||
10 | Qline = make_quadrature(line, 6) | |
11 | ||
12 | linebfs = expansions.LineExpansionSet(line) | |
13 | linevals = linebfs.tabulate(1, Qline.pts) | |
14 | ||
15 | # n, n moments | |
16 | for ed in range(3): | |
17 | n = T.compute_scaled_normal(ed) | |
18 | wts = np.asarray(Qline.wts) | |
19 | nqpline = len(wts) | |
20 | ||
21 | vals = AW.tabulate(0, Qline.pts, (1, ed))[(0, 0)] | |
22 | nnvals = np.zeros((18, nqpline)) | |
23 | for i in range(18): | |
24 | for j in range(len(wts)): | |
25 | nnvals[i, j] = n @ vals[i, :, :, j] @ n | |
26 | ||
27 | nnmoments = np.zeros((18, 2)) | |
28 | ||
29 | for bf in range(18): | |
30 | for k in range(nqpline): | |
31 | for m in (0, 1): | |
32 | nnmoments[bf, m] += wts[k] * nnvals[bf, k] * linevals[m, k] | |
33 | ||
34 | for bf in range(18): | |
35 | if bf != AW.dual.entity_ids[1][ed][0] and bf != AW.dual.entity_ids[1][ed][2]: | |
36 | assert np.allclose(nnmoments[bf, :], np.zeros(2)) | |
37 | ||
38 | # n, t moments | |
39 | for ed in range(3): | |
40 | n = T.compute_scaled_normal(ed) | |
41 | t = T.compute_edge_tangent(ed) | |
42 | wts = np.asarray(Qline.wts) | |
43 | nqpline = len(wts) | |
44 | ||
45 | vals = AW.tabulate(0, Qline.pts, (1, ed))[(0, 0)] | |
46 | ntvals = np.zeros((18, nqpline)) | |
47 | for i in range(18): | |
48 | for j in range(len(wts)): | |
49 | ntvals[i, j] = n @ vals[i, :, :, j] @ t | |
50 | ||
51 | ntmoments = np.zeros((18, 2)) | |
52 | ||
53 | for bf in range(18): | |
54 | for k in range(nqpline): | |
55 | for m in (0, 1): | |
56 | ntmoments[bf, m] += wts[k] * ntvals[bf, k] * linevals[m, k] | |
57 | ||
58 | for bf in range(18): | |
59 | if bf != AW.dual.entity_ids[1][ed][1] and bf != AW.dual.entity_ids[1][ed][3]: | |
60 | assert np.allclose(ntmoments[bf, :], np.zeros(2), atol=1.e-7) | |
61 | ||
62 | # check internal dofs | |
63 | Q = make_quadrature(T, 6) | |
64 | qpvals = AW.tabulate(0, Q.pts)[(0, 0)] | |
65 | const_moms = qpvals @ Q.wts | |
66 | assert np.allclose(const_moms[:12], np.zeros((12, 2, 2))) | |
67 | assert np.allclose(const_moms[15:], np.zeros((3, 2, 2))) | |
68 | assert np.allclose(const_moms[12:15, 0, 0], np.asarray([1, 0, 0])) | |
69 | assert np.allclose(const_moms[12:15, 0, 1], np.asarray([0, 1, 0])) | |
70 | assert np.allclose(const_moms[12:15, 1, 0], np.asarray([0, 1, 0])) | |
71 | assert np.allclose(const_moms[12:15, 1, 1], np.asarray([0, 0, 1])) |
0 | import numpy as np | |
1 | import pytest | |
2 | ||
3 | from FIAT.reference_element import UFCInterval, UFCTriangle, UFCTetrahedron | |
4 | from FIAT import create_quadrature, make_quadrature, polynomial_set | |
5 | from FIAT.kong_mulder_veldhuizen import KongMulderVeldhuizen as KMV | |
6 | ||
7 | I = UFCInterval() | |
8 | T = UFCTriangle() | |
9 | Te = UFCTetrahedron() | |
10 | ||
11 | ||
12 | @pytest.mark.parametrize("p_d", [(1, 1), (2, 3), (3, 4)]) | |
13 | def test_kmv_quad_tet_schemes(p_d): # noqa: W503 | |
14 | fct = np.math.factorial | |
15 | p, d = p_d | |
16 | q = create_quadrature(Te, p, "KMV") | |
17 | for i in range(d + 1): | |
18 | for j in range(d + 1 - i): | |
19 | for k in range(d + 1 - i - j): | |
20 | trueval = fct(i) * fct(j) * fct(k) / fct(i + j + k + 3) | |
21 | assert ( | |
22 | np.abs( | |
23 | trueval - | |
24 | q.integrate(lambda x: x[0] ** i * x[1] ** j * x[2] ** k) | |
25 | ) < | |
26 | 1.0e-10 | |
27 | ) | |
28 | ||
29 | ||
30 | @pytest.mark.parametrize("p_d", [(1, 1), (2, 3), (3, 5), (4, 7), (5, 9)]) | |
31 | def test_kmv_quad_tri_schemes(p_d): | |
32 | fct = np.math.factorial | |
33 | p, d = p_d | |
34 | q = create_quadrature(T, p, "KMV") | |
35 | for i in range(d + 1): | |
36 | for j in range(d + 1 - i): | |
37 | trueval = fct(i) * fct(j) / fct(i + j + 2) | |
38 | assert ( | |
39 | np.abs(trueval - q.integrate(lambda x: x[0] ** i * x[1] ** j)) < 1.0e-10 | |
40 | ) | |
41 | ||
42 | ||
43 | @pytest.mark.parametrize( | |
44 | "element_degree", | |
45 | [(KMV(T, 1), 1), (KMV(T, 2), 2), (KMV(T, 3), 3), (KMV(T, 4), 4), (KMV(T, 5), 5)], | |
46 | ) | |
47 | def test_Kronecker_property_tris(element_degree): | |
48 | """ | |
49 | Evaluating the nodal basis at the special quadrature points should | |
50 | have a Kronecker property. Also checks that the basis functions | |
51 | and quadrature points are given the same ordering. | |
52 | """ | |
53 | element, degree = element_degree | |
54 | qr = create_quadrature(T, degree, scheme="KMV") | |
55 | (basis,) = element.tabulate(0, qr.get_points()).values() | |
56 | assert np.allclose(basis, np.eye(*basis.shape)) | |
57 | ||
58 | ||
59 | @pytest.mark.parametrize( | |
60 | "element_degree", [(KMV(Te, 1), 1), (KMV(Te, 2), 2), (KMV(Te, 3), 3)] | |
61 | ) | |
62 | def test_Kronecker_property_tets(element_degree): | |
63 | """ | |
64 | Evaluating the nodal basis at the special quadrature points should | |
65 | have a Kronecker property. Also checks that the basis functions | |
66 | and quadrature points are given the same ordering. | |
67 | """ | |
68 | element, degree = element_degree | |
69 | qr = create_quadrature(Te, degree, scheme="KMV") | |
70 | (basis,) = element.tabulate(0, qr.get_points()).values() | |
71 | assert np.allclose(basis, np.eye(*basis.shape)) | |
72 | ||
73 | ||
74 | @pytest.mark.parametrize("degree", [2, 3, 4]) | |
75 | def test_edge_degree(degree): | |
76 | """Verify that the outer edges of a degree KMV element | |
77 | are indeed of degree and the interior is of degree+1""" | |
78 | # create a degree+1 polynomial | |
79 | I = UFCInterval() | |
80 | # an exact quad. rule for a degree+1 polynomial on the UFCinterval | |
81 | qr = make_quadrature(I, degree + 1) | |
82 | W = np.diag(qr.wts) | |
83 | sd = I.get_spatial_dimension() | |
84 | pset = polynomial_set.ONPolynomialSet(I, degree + 1, (sd,)) | |
85 | pset = pset.take([degree + 1]) | |
86 | # tabulate at the quadrature points | |
87 | interval_vals = pset.tabulate(qr.get_points())[(0,)] | |
88 | interval_vals = np.squeeze(interval_vals) | |
89 | # create degree KMV element (should have degree outer edges and degree+1 edge in center) | |
90 | T = UFCTriangle() | |
91 | element = KMV(T, degree) | |
92 | # tabulate values on an edge of the KMV element | |
93 | for e in range(3): | |
94 | edge_values = element.tabulate(0, qr.get_points(), (1, e))[(0, 0)] | |
95 | # degree edge should be orthogonal to degree+1 ONpoly edge values | |
96 | result = edge_values @ W @ interval_vals.T | |
97 | assert np.allclose(np.sum(result), 0.0) | |
98 | ||
99 | ||
100 | @pytest.mark.parametrize( | |
101 | "element_degree", | |
102 | [(KMV(T, 1), 1), (KMV(T, 2), 2), (KMV(T, 3), 3), (KMV(T, 4), 4), (KMV(T, 5), 5)], | |
103 | ) | |
104 | def test_interpolate_monomials_tris(element_degree): | |
105 | element, degree = element_degree | |
106 | ||
107 | # ordered the same way as KMV nodes | |
108 | pts = create_quadrature(T, degree, "KMV").pts | |
109 | ||
110 | Q = make_quadrature(T, 2 * degree) | |
111 | phis = element.tabulate(0, Q.pts)[0, 0] | |
112 | print("deg", degree) | |
113 | for i in range(degree + 1): | |
114 | for j in range(degree + 1 - i): | |
115 | m = lambda x: x[0] ** i * x[1] ** j | |
116 | dofs = np.array([m(pt) for pt in pts]) | |
117 | interp = phis.T @ dofs | |
118 | matqp = np.array([m(pt) for pt in Q.pts]) | |
119 | err = 0.0 | |
120 | for k in range(phis.shape[1]): | |
121 | err += Q.wts[k] * (interp[k] - matqp[k]) ** 2 | |
122 | assert np.sqrt(err) <= 1.0e-12 | |
123 | ||
124 | ||
125 | @pytest.mark.parametrize( | |
126 | "element_degree", [(KMV(Te, 1), 1), (KMV(Te, 2), 2), (KMV(Te, 3), 3)] | |
127 | ) | |
128 | def test_interpolate_monomials_tets(element_degree): | |
129 | element, degree = element_degree | |
130 | ||
131 | # ordered the same way as KMV nodes | |
132 | pts = create_quadrature(Te, degree, "KMV").pts | |
133 | ||
134 | Q = make_quadrature(Te, 2 * degree) | |
135 | phis = element.tabulate(0, Q.pts)[0, 0, 0] | |
136 | print("deg", degree) | |
137 | for i in range(degree + 1): | |
138 | for j in range(degree + 1 - i): | |
139 | for k in range(degree + 1 - i - j): | |
140 | m = lambda x: x[0] ** i * x[1] ** j * x[2] ** k | |
141 | dofs = np.array([m(pt) for pt in pts]) | |
142 | interp = phis.T @ dofs | |
143 | matqp = np.array([m(pt) for pt in Q.pts]) | |
144 | err = 0.0 | |
145 | for kk in range(phis.shape[1]): | |
146 | err += Q.wts[kk] * (interp[kk] - matqp[kk]) ** 2 | |
147 | assert np.sqrt(err) <= 1.0e-12 |
0 | import numpy as np | |
1 | from FIAT import ufc_simplex, MardalTaiWinther, make_quadrature, expansions | |
2 | ||
3 | ||
4 | def test_dofs(): | |
5 | line = ufc_simplex(1) | |
6 | T = ufc_simplex(2) | |
7 | T.vertices = np.random.rand(3, 2) | |
8 | MTW = MardalTaiWinther(T, 3) | |
9 | ||
10 | Qline = make_quadrature(line, 6) | |
11 | ||
12 | linebfs = expansions.LineExpansionSet(line) | |
13 | linevals = linebfs.tabulate(1, Qline.pts) | |
14 | ||
15 | for ed in range(3): | |
16 | n = T.compute_scaled_normal(ed) | |
17 | wts = np.asarray(Qline.wts) | |
18 | ||
19 | vals = MTW.tabulate(0, Qline.pts, (1, ed))[(0, 0)] | |
20 | nvals = np.dot(np.transpose(vals, (0, 2, 1)), n) | |
21 | normal_moments = np.zeros((9, 2)) | |
22 | for bf in range(9): | |
23 | for k in range(len(Qline.wts)): | |
24 | for m in (0, 1): | |
25 | normal_moments[bf, m] += wts[k] * nvals[bf, k] * linevals[m, k] | |
26 | right = np.zeros((9, 2)) | |
27 | right[3*ed, 0] = 1.0 | |
28 | right[3*ed+2, 1] = 1.0 | |
29 | assert np.allclose(normal_moments, right) | |
30 | for ed in range(3): | |
31 | t = T.compute_edge_tangent(ed) | |
32 | wts = np.asarray(Qline.wts) | |
33 | ||
34 | vals = MTW.tabulate(0, Qline.pts, (1, ed))[(0, 0)] | |
35 | tvals = np.dot(np.transpose(vals, (0, 2, 1)), t) | |
36 | tangent_moments = np.zeros(9) | |
37 | for bf in range(9): | |
38 | for k in range(len(Qline.wts)): | |
39 | tangent_moments[bf] += wts[k] * tvals[bf, k] * linevals[0, k] | |
40 | right = np.zeros(9) | |
41 | right[3*ed + 1] = 1.0 | |
42 | assert np.allclose(tangent_moments, right) |
0 | # Copyright (C) 2020 Robert C Kirby (Baylor University) | |
1 | # | |
2 | # This file is part of FIAT (https://www.fenicsproject.org) | |
3 | # | |
4 | # SPDX-License-Identifier: LGPL-3.0-or-later | |
5 | ||
6 | import pytest | |
7 | import numpy | |
8 | ||
9 | from FIAT import ( | |
10 | BrezziDouglasMarini, Morley, QuinticArgyris, CubicHermite) | |
11 | ||
12 | from FIAT.reference_element import ( | |
13 | UFCTriangle, | |
14 | make_lattice) | |
15 | ||
16 | from FIAT.pointwise_dual import compute_pointwise_dual as cpd | |
17 | ||
18 | T = UFCTriangle() | |
19 | ||
20 | ||
21 | @pytest.mark.parametrize("element", | |
22 | [CubicHermite(T), | |
23 | Morley(T), | |
24 | QuinticArgyris(T), | |
25 | BrezziDouglasMarini(T, 1, variant="integral")]) | |
26 | def test_pw_dual(element): | |
27 | deg = element.degree() | |
28 | ref_el = element.ref_el | |
29 | poly_set = element.poly_set | |
30 | pts = make_lattice(ref_el.vertices, deg) | |
31 | ||
32 | assert numpy.allclose(element.dual.to_riesz(poly_set), | |
33 | cpd(element, pts).to_riesz(poly_set)) |
0 | from FIAT.reference_element import UFCQuadrilateral | |
0 | from FIAT.reference_element import ( | |
1 | UFCQuadrilateral, UFCInterval, TensorProductCell) | |
1 | 2 | from FIAT import Serendipity |
2 | 3 | import numpy as np |
3 | 4 | import sympy |
29 | 30 | assert actual.shape == (8, 2) |
30 | 31 | assert np.allclose(np.asarray(expect, dtype=float), |
31 | 32 | actual.reshape(8, 2)) |
33 | ||
34 | ||
35 | def test_dual_tensor_versus_ufc(): | |
36 | K0 = UFCQuadrilateral() | |
37 | ell = UFCInterval() | |
38 | K1 = TensorProductCell(ell, ell) | |
39 | S0 = Serendipity(K0, 2) | |
40 | S1 = Serendipity(K1, 2) | |
41 | # since both elements go through the flattened cell to produce the | |
42 | # dual basis, they ought to do *exactly* the same calculations and | |
43 | # hence form exactly the same nodes. | |
44 | for i in range(S0.space_dimension()): | |
45 | assert S0.dual.nodes[i].pt_dict == S1.dual.nodes[i].pt_dict |