diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index dc0c89018..000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,26 +0,0 @@ -version: 2 -jobs: - build: - docker: - - image: circleci/python:3.6 - working_directory: ~/fiat-test - steps: - - checkout - - run: - name: Install dependencies # Install with sudo as tests not run as superuser in circleci/python - command: sudo pip install flake8 pytest pytest-cases pydocstyle numpy sympy coverage coveralls --upgrade - - run: - name: Install FIAT - command: pip install . --user - - run: - name: Run flake8 and pydocstyle tests - command: | - python -m flake8 . - python -m pydocstyle . - - run: - name: Run unit tests - command: | - DATA_REPO_GIT="" coverage run --source FIAT -m pytest -v test/ - if [ -v COVERALLS_REPO_TOKEN ]; then - coveralls - fi diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml new file mode 100644 index 000000000..53c7aac61 --- /dev/null +++ b/.github/workflows/pythonapp.yml @@ -0,0 +1,41 @@ +# This workflow will install Python dependencies, run tests and lint +# with a single version of Python For more information see: +# https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: FIAT CI + +on: [push, pull_request] + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, macos-latest] + python-version: [3.7, 3.8] + + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Lint with flake8 + run: | + pip install flake8 + flake8 --statistics . + - name: Check documentation style + run: | + pip install pydocstyle + python -m pydocstyle . + - name: Install FIAT + run: pip install . + - name: Test with pytest + run: | + pip install coveralls pytest pytest-cov pytest-xdist + DATA_REPO_GIT="" pytest --cov=FIAT/ test/ + - name: Coveralls + if: ${{ github.repository == 'FEniCS/fiat' && github.head_ref == '' && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.8' }} + env: + COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }} + run: coveralls diff --git a/.gitignore b/.gitignore index d70bb6226..26346c53d 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,5 @@ /.cache/ /doc/sphinx/source/api-doc release/ + +/doc/sphinx/source/_build/ \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index 5dba4cb3d..9d326afe7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,7 @@ python: before_install: - pip install flake8 - - pip install pytest pytest-cases + - pip install pytest install: - pip install . diff --git a/AUTHORS b/AUTHORS index 81d16671b..02ff6db21 100644 --- a/AUTHORS +++ b/AUTHORS @@ -73,3 +73,6 @@ Contributors: Cyrus Cheng email: cyruscycheng21@gmail.com + + Reuben W. Hill + email: reuben.hill10@imperial.ac.uk diff --git a/FIAT/P0.py b/FIAT/P0.py index 6d62e1d8c..5c5522cf8 100644 --- a/FIAT/P0.py +++ b/FIAT/P0.py @@ -19,7 +19,10 @@ def __init__(self, ref_el): entity_ids = {} nodes = [] vs = numpy.array(ref_el.get_vertices()) - bary = tuple(numpy.average(vs, 0)) + if ref_el.get_dimension() == 0: + bary = () + else: + bary = tuple(numpy.average(vs, 0)) nodes = [functional.PointEvaluation(ref_el, bary)] entity_ids = {} diff --git a/FIAT/Sminus.py b/FIAT/Sminus.py index 728886914..e7d1f2b56 100644 --- a/FIAT/Sminus.py +++ b/FIAT/Sminus.py @@ -32,6 +32,7 @@ def triangular_number(n): return int((n+1)*n/2) + def choose_ijk_total(degree): top = 1 for i in range(1, 2 + degree + 1): @@ -39,7 +40,7 @@ def choose_ijk_total(degree): bottom = 1 for i in range(1, degree + 1): bottom = i * bottom - return int(top /(2 * bottom)) + return int(top / (2 * bottom)) class TrimmedSerendipity(FiniteElement): @@ -69,17 +70,17 @@ def __init__(self, ref_el, degree, mapping): if(degree >= 2): entity_ids[2][0] = list(range(cur, cur + 2*triangular_number(degree - 2) + degree)) - + cur += 2*triangular_number(degree - 2) + degree else: - #3-d case. + # 3-d case. entity_ids[3] = {} for j in sorted(flat_topology[1]): entity_ids[1][j] = list(range(cur, cur + degree)) cur = cur + degree - - if (degree >= 2 ): + + if (degree >= 2): if (degree < 4): for j in sorted(flat_topology[2]): entity_ids[2][j] = list(range(cur, cur + 2*triangular_number(degree - 2) + degree)) @@ -88,8 +89,7 @@ def __init__(self, ref_el, degree, mapping): for j in sorted(flat_topology[2]): entity_ids[2][j] = list(range(cur, cur + 3 * degree - 4)) cur = cur + 3*degree - 4 - - + if (degree >= 4): if (degree == 4): entity_ids[3][0] = list(range(cur, cur + 6)) @@ -172,7 +172,6 @@ def tabulate(self, order, points, entity=None): phivals[alpha] = T return phivals - def entity_dofs(self): """Return the map of topological entities to degrees of freedom for the finite element.""" @@ -284,40 +283,40 @@ def trimmed_f_lambda_2d(deg, dx, dy, x_mid, y_mid): def e_lambda_1_3d_trimmed(max_deg, dx, dy, dz, x_mid, y_mid, z_mid): EL = tuple([]) - ##assignment to edge x=y=0 - for i in range (0, max_deg): + # #assignment to edge x=y=0 + for i in range(0, max_deg): EL += tuple([(0, 0, leg(i, z_mid) * dx[0] * dy[0])]) - ##assignment to edge x=0, y=1 + # #assignment to edge x=0, y=1 for i in range(0, max_deg): EL += tuple([(0, 0, leg(i, z_mid) * dy[1] * dx[0])]) - ##assignment to edge x = 1, y = 0 + # #assignment to edge x = 1, y = 0 for i in range(0, max_deg): EL += tuple([(0, 0, leg(i, z_mid) * dx[1] * dy[0])]) - ##assignment to edge x = 1, y = 1 + # #assignment to edge x = 1, y = 1 for i in range(0, max_deg): EL += tuple([(0, 0, leg(i, z_mid) * dx[1] * dy[1])]) - ##assignment to edge x = 0, z = 0 + # #assignment to edge x = 0, z = 0 for i in range(0, max_deg): EL += tuple([(0, leg(i, y_mid) * dx[0] * dz[0], 0)]) - ##assignment to edge x = 0, z = 1 + # #assignment to edge x = 0, z = 1 for i in range(0, max_deg): EL += tuple([(0, leg(i, y_mid) * dx[0] * dz[1], 0)]) - ##assignment to edge x = 1, z = 0 + # #assignment to edge x = 1, z = 0 for i in range(0, max_deg): EL += tuple([(0, leg(i, y_mid) * dx[1] * dz[0], 0)]) - ##assignment to edge x = 1, z = 1 + # #assignment to edge x = 1, z = 1 for i in range(0, max_deg): EL += tuple([(0, leg(i, y_mid) * dx[1] * dz[1], 0)]) - ##assignment to edge y = 0, z = 0 + # #assignment to edge y = 0, z = 0 for i in range(0, max_deg): EL += tuple([(leg(i, x_mid) * dy[0] * dz[0], 0, 0)]) - ##assignment to edge y = 0, z = 1 + # #assignment to edge y = 0, z = 1 for i in range(0, max_deg): EL += tuple([(leg(i, x_mid) * dy[0] * dz[1], 0, 0)]) - ##assignment to edge y = 1, z = 0 + # #assignment to edge y = 1, z = 0 for i in range(0, max_deg): EL += tuple([(leg(i, x_mid) * dy[1] * dz[0], 0, 0)]) - ##assignment to edge y = 1, z = 1 + # #assignment to edge y = 1, z = 1 for i in range(0, max_deg): EL += tuple([(leg(i, x_mid) * dy[1] * dz[1], 0, 0)]) return EL @@ -325,72 +324,72 @@ def e_lambda_1_3d_trimmed(max_deg, dx, dy, dz, x_mid, y_mid, z_mid): def f_lambda_1_3d_trimmed(max_deg, dx, dy, dz, x_mid, y_mid, z_mid): FL = tuple([]) - ##Assignment to face x = 0, Ftilde + # #Assignment to face x = 0, Ftilde FL += tuple([(0, leg(max_deg - 2, z_mid) * dx[0] * dz[0] * dz[1], 0)]) FL += tuple([(0, 0, leg(max_deg - 2, y_mid) * dx[0] * dy[0] * dy[1])]) for j in range(1, max_deg - 1): - FL += tuple([(0, leg(j, y_mid) * leg(max_deg - j - 2, z_mid) * dx[0] * dz[0] * dz[1], - -leg(j - 1, y_mid) * leg(max_deg - j - 1, z_mid) * dx[0] * dy[0] * dy[1])]) - ##Assignment to face x = 0, F + FL += tuple([(0, leg(j, y_mid) * leg(max_deg - j - 2, z_mid) * dx[0] * dz[0] * dz[1], + -leg(j - 1, y_mid) * leg(max_deg - j - 1, z_mid) * dx[0] * dy[0] * dy[1])]) + # #Assignment to face x = 0, F for j in range(1, max_deg - 1): k = max_deg - j - 2 FL += tuple([(0, leg(j, y_mid) * leg(k, z_mid) * dx[0] * dz[0] * dz[1], 0)]) FL += tuple([(0, 0, leg(j, z_mid) * leg(k, x_mid) * dx[0] * dy[0] * dy[1])]) - ##Assignment to face x = 1, Ftilde + # #Assignment to face x = 1, Ftilde FL += tuple([(0, leg(max_deg - 2, z_mid) * dx[1] * dz[0] * dz[1], 0)]) FL += tuple([(0, 0, leg(max_deg - 2, y_mid) * dx[1] * dy[0] * dy[1])]) for j in range(1, max_deg - 1): - FL += tuple([(0, leg(j, y_mid) * leg(max_deg - j - 2, z_mid) * dx[1] * dz[0] * dz[1], - -leg(j - 1, y_mid) * leg(max_deg - j - 1, z_mid) * dx[1] * dy[0] * dy[1])]) - ##Assignment to face x = 1, F + FL += tuple([(0, leg(j, y_mid) * leg(max_deg - j - 2, z_mid) * dx[1] * dz[0] * dz[1], + -leg(j - 1, y_mid) * leg(max_deg - j - 1, z_mid) * dx[1] * dy[0] * dy[1])]) + # #Assignment to face x = 1, F for j in range(1, max_deg - 1): k = max_deg - j - 2 FL += tuple([(0, leg(j, y_mid) * leg(k, z_mid) * dx[1] * dz[0] * dz[1], 0)]) FL += tuple([(0, 0, leg(j, z_mid) * leg(k, x_mid) * dx[1] * dy[0] * dy[1])]) - ##Assignment to face y = 0, Ftilde + # #Assignment to face y = 0, Ftilde FL += tuple([(leg(max_deg - 2, z_mid) * dy[0] * dz[0] * dz[1], 0, 0)]) FL += tuple([(0, 0, leg(max_deg - 2, x_mid) * dy[0] * dx[0] * dx[1])]) for j in range(1, max_deg - 1): - FL += tuple([(leg(j, x_mid) * leg(max_deg - j - 2, z_mid) * dy[0] * dz[0] * dz[1], 0, - -leg(j - 1, x_mid) * leg(max_deg - j - 1, z_mid) * dy[0] * dx[0] * dx[1])]) - ##Assignment to face y = 0, F + FL += tuple([(leg(j, x_mid) * leg(max_deg - j - 2, z_mid) * dy[0] * dz[0] * dz[1], 0, + -leg(j - 1, x_mid) * leg(max_deg - j - 1, z_mid) * dy[0] * dx[0] * dx[1])]) + # #Assignment to face y = 0, F for j in range(1, max_deg - 1): k = max_deg - j - 2 FL += tuple([(leg(j, x_mid) * leg(k, z_mid) * dy[0] * dz[0] * dz[1], 0, 0)]) FL += tuple([(0, 0, leg(j, z_mid) * leg(k, y_mid) * dy[0] * dx[0] * dx[1])]) - ##Assignment to face y = 1, Ftilde + # #Assignment to face y = 1, Ftilde FL += tuple([(leg(max_deg - 2, z_mid) * dy[1] * dz[0] * dz[1], 0, 0)]) FL += tuple([(0, 0, leg(max_deg - 2, x_mid) * dy[1] * dx[0] * dx[1])]) for j in range(1, max_deg - 1): - FL += tuple([(leg(j, x_mid) * leg(max_deg - j - 2, z_mid) * dy[1] * dz[0] * dz[1], 0, - -leg(j - 1, x_mid) * leg(max_deg - j - 1, z_mid) * dy[1] * dx[0] * dx[1])]) - ##Assignment to face y = 1, F + FL += tuple([(leg(j, x_mid) * leg(max_deg - j - 2, z_mid) * dy[1] * dz[0] * dz[1], 0, + -leg(j - 1, x_mid) * leg(max_deg - j - 1, z_mid) * dy[1] * dx[0] * dx[1])]) + # #Assignment to face y = 1, F for j in range(1, max_deg - 1): k = max_deg - j - 2 FL += tuple([(leg(j, x_mid) * leg(k, z_mid) * dy[1] * dz[0] * dz[1], 0, 0)]) FL += tuple([(0, 0, leg(j, z_mid) * leg(k, y_mid) * dy[1] * dx[0] * dx[1])]) - ##Assignment to face z = 0, Ftilde + # #Assignment to face z = 0, Ftilde FL += tuple([(leg(max_deg - 2, y_mid) * dz[0] * dy[0] * dy[1], 0, 0)]) FL += tuple([(0, leg(max_deg - 2, x_mid) * dz[0] * dx[0] * dx[1], 0)]) for j in range(1, max_deg - 1): FL += tuple([(leg(j, x_mid) * leg(max_deg - j - 2, y_mid) * dz[0] * dy[0] * dy[1], - -leg(j - 1, x_mid) * leg(max_deg - j - 1, y_mid) * dz[0] * dx[0] * dx[1], 0)]) - ##Assignment to face z = 0, F + -leg(j - 1, x_mid) * leg(max_deg - j - 1, y_mid) * dz[0] * dx[0] * dx[1], 0)]) + # #Assignment to face z = 0, F for j in range(1, max_deg - 1): k = max_deg - j - 2 FL += tuple([(leg(j, x_mid) * leg(k, y_mid) * dz[0] * dy[0] * dy[1], 0, 0)]) FL += tuple([(0, leg(j, y_mid) * leg(k, x_mid) * dz[0] * dx[0] * dx[1], 0)]) - ##Assignment to face z = 1, Ftilde + # #Assignment to face z = 1, Ftilde FL += tuple([(leg(max_deg - 2, y_mid) * dz[1] * dy[0] * dy[1], 0, 0)]) FL += tuple([(0, leg(max_deg - 2, x_mid) * dz[1] * dx[0] * dx[1], 0)]) for j in range(1, max_deg - 1): FL += tuple([(leg(j, x_mid) * leg(max_deg - j - 2, y_mid) * dz[1] * dy[0] * dy[1], - -leg(j - 1, x_mid) * leg(max_deg - j - 1, y_mid) * dz[1] * dx[0] * dx[1], 0)]) - ##Assignment to face z = 1, F + -leg(j - 1, x_mid) * leg(max_deg - j - 1, y_mid) * dz[1] * dx[0] * dx[1], 0)]) + # #Assignment to face z = 1, F for j in range(1, max_deg - 1): k = max_deg - j - 2 FL += tuple([(leg(j, x_mid) * leg(k, y_mid) * dz[1] * dy[0] * dy[1], 0, 0)]) @@ -418,11 +417,11 @@ def I_lambda_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid): DegsOfIteration = determine_I_lambda_1_portions_3d(deg) IL = tuple([]) for Degs in DegsOfIteration: - IL += tuple([(leg(Degs[0], x_mid) * leg(Degs[1], y_mid) * leg(Degs[2], z_mid) * + IL += tuple([(leg(Degs[0], x_mid) * leg(Degs[1], y_mid) * leg(Degs[2], z_mid) * dy[0] * dy[1] * dz[0] * dz[1], 0, 0)]) - IL += tuple([(0, leg(Degs[0], x_mid) * leg(Degs[1], y_mid) * leg(Degs[2], z_mid) * + IL += tuple([(0, leg(Degs[0], x_mid) * leg(Degs[1], y_mid) * leg(Degs[2], z_mid) * dx[0] * dx[1] * dz[0] * dz[1], 0)]) - IL += tuple([(0, 0, leg(Degs[0], x_mid) * leg(Degs[1], y_mid) * leg(Degs[2], z_mid) * + IL += tuple([(0, 0, leg(Degs[0], x_mid) * leg(Degs[1], y_mid) * leg(Degs[2], z_mid) * dy[0] * dy[1] * dy[0] * dy[1])]) return IL @@ -443,10 +442,10 @@ def I_lambda_1_tilde_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid): if(deg > 5): ILtilde += tuple([(0, leg(j, y_mid) * leg(deg - j - 4, z_mid) * dx[0] * dx[1] * dz[0] * dz[1], -leg(j - 1, y_mid) * leg(deg - j - 3, z_mid) * dx[0] * dx[1] * dy[0] * dy[1])]) - return ILtilde + return ILtilde -#This is always 1-forms regardless of 2 or 3 dimensions. +# This is always 1-forms regardless of 2 or 3 dimensions. class TrimmedSerendipityEdge(TrimmedSerendipity): def __init__(self, ref_el, degree): if degree < 1: @@ -489,11 +488,11 @@ def __init__(self, ref_el, degree): y_mid, z_mid) else: IL = () - + Sminus_list = EL + FL if dim == 3: Sminus_list = Sminus_list + IL - + if dim == 2: self.basis = {(0, 0): Array(Sminus_list)} else: @@ -517,7 +516,7 @@ def __init__(self, ref_el, degree): dy = ((verts[-1][1] - y)/(verts[-1][1] - verts[0][1]), (y - verts[0][1])/(verts[-1][1] - verts[0][1])) x_mid = 2*x-(verts[-1][0] + verts[0][0]) y_mid = 2*y-(verts[-1][1] + verts[0][1]) - + EL = e_lambda_1_2d_part_one(degree, dx, dy, x_mid, y_mid) if degree >= 2: FL = trimmed_f_lambda_2d(degree, dx, dy, x_mid, y_mid) @@ -527,4 +526,4 @@ def __init__(self, ref_el, degree): Sminus_list = EL + FL Sminus_list = [[-a[1], a[0]] for a in Sminus_list] self.basis = {(0, 0): Array(Sminus_list)} - super(TrimmedSerendipityFace, self).__init__(ref_el=ref_el, degree=degree, mapping="contravariant piola") \ No newline at end of file + super(TrimmedSerendipityFace, self).__init__(ref_el=ref_el, degree=degree, mapping="contravariant piola") diff --git a/FIAT/SminusCurl.py b/FIAT/SminusCurl.py index cbf46dc3b..fda34ecfa 100644 --- a/FIAT/SminusCurl.py +++ b/FIAT/SminusCurl.py @@ -1,4 +1,4 @@ -#Working on 3d trimmed serendipity 2 forms now. +# Working on 3d trimmed serendipity 2 forms now. from sympy import symbols, legendre, Array, diff import numpy as np from FIAT.finite_element import FiniteElement @@ -10,9 +10,11 @@ variables = (x, y, z) leg = legendre + def triangular_number(n): return int((n+1)*n/2) + def choose_ijk_total(degree): top = 1 for i in range(1, 2 + degree + 1): @@ -20,7 +22,7 @@ def choose_ijk_total(degree): bottom = 1 for i in range(1, degree + 1): bottom = i * bottom - return int(top /(2 * bottom)) + return int(top / (2 * bottom)) class TrimmedSerendipity(FiniteElement): @@ -32,7 +34,7 @@ def __init__(self, ref_el, degree, mapping): dim = flat_el.get_spatial_dimension() self.fdim = dim if dim != 3: - if dim !=2: + if dim != 2: raise Exception("Trimmed serendipity elements only valid for dimensions 2 and 3") flat_topology = flat_el.get_topology() @@ -44,11 +46,11 @@ def __init__(self, ref_el, degree, mapping): for entity in entities: entity_ids[top_dim][entity] = [] - #3-d case. + # 3-d case. if dim == 3: entity_ids[3] = {} - for l in sorted(flat_topology[1]): - entity_ids[1][l] = list(range(cur, cur + degree)) + for p in sorted(flat_topology[1]): + entity_ids[1][p] = list(range(cur, cur + degree)) cur = cur + degree if (degree > 1): face_ids = degree @@ -68,7 +70,7 @@ def __init__(self, ref_el, degree, mapping): elif (degree == 6): interior_tilde_ids = 6 + 3 * (degree - 3) else: - interior_tilde_ids = 6 + 3 * (degree - 4) + interior_tilde_ids = 6 + 3 * (degree - 4) else: interior_tilde_ids = 0 @@ -81,7 +83,7 @@ def __init__(self, ref_el, degree, mapping): if(degree >= 2): entity_ids[2][0] = list(range(cur, cur + 2*triangular_number(degree - 2) + degree)) - + cur += 2*triangular_number(degree - 2) + degree formdegree = 1 @@ -177,7 +179,7 @@ def space_dimension(self): class TrimmedSerendipityCurl(TrimmedSerendipity): - #3d Curl element needs to be fixed entirely. This should correspond to 1-forms in 3d. + # 3d Curl element needs to be fixed entirely. This should correspond to 1-forms in 3d. def __init__(self, ref_el, degree): if degree < 1: raise Exception("Trimmed serendipity face elements only valid for k >= 1") @@ -185,11 +187,10 @@ def __init__(self, ref_el, degree): flat_el = flatten_reference_cube(ref_el) dim = flat_el.get_spatial_dimension() if dim != 2: - if dim !=3: + if dim != 3: raise Exception("Trimmed serendipity face elements only valid for dimensions 2 and 3") verts = flat_el.get_vertices() - dx = ((verts[-1][0] - x)/(verts[-1][0] - verts[0][0]), (x - verts[0][0])/(verts[-1][0] - verts[0][0])) dy = ((verts[-1][1] - y)/(verts[-1][1] - verts[0][1]), (y - verts[0][1])/(verts[-1][1] - verts[0][1])) @@ -204,7 +205,7 @@ def __init__(self, ref_el, degree): if dim == 3: if degree < 1: - raise Exception ("Trimmed Serendipity fce elements only valid for k >= 1") + raise Exception("Trimmed Serendipity fce elements only valid for k >= 1") EL = e_lambda_1_3d(degree, dx, dy, dz, x_mid, y_mid, z_mid) if (degree > 1): FL = f_lambda_1_3d(degree, dx, dy, dz, x_mid, y_mid, z_mid) @@ -218,16 +219,16 @@ def __init__(self, ref_el, degree): Sminus_list = EL + FL + IL self.basis = {(0, 0, 0): Array(Sminus_list)} super(TrimmedSerendipityCurl, self).__init__(ref_el=ref_el, degree=degree, mapping="contravariant piola") - + else: - ##Put all 2 dimensional stuff here. - ##This part might be good, need to test it. + # #Put all 2 dimensional stuff here. + # #This part might be good, need to test it. if degree < 1: raise Exception("Trimmed serendipity face elements only valid for k >= 1") - #flat_el = flatten_reference_cube(ref_el) - #verts = flat_el.get_vertices() - + # flat_el = flatten_reference_cube(ref_el) + # verts = flat_el.get_vertices() + EL = e_lambda_1_2d_part_one(degree, dx, dy, x_mid, y_mid) if degree >= 2: FL = trimmed_f_lambda_2d(degree, dx, dy, x_mid, y_mid) @@ -241,46 +242,46 @@ def __init__(self, ref_el, degree): def e_lambda_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid): EL = () - #IDs associated to edge 0. + # IDs associated to edge 0. for j in range(0, deg): EL += tuple([(0, 0, leg(j, z_mid) * dx[0] * dy[0])]) - #IDs associated to edge 1. + # IDs associated to edge 1. for j in range(0, deg): EL += tuple([(0, 0, leg(j, z_mid) * dx[0] * dy[1])]) - #IDs associated to edge 2. + # IDs associated to edge 2. for j in range(0, deg): EL += tuple([(0, 0, leg(j, z_mid) * dx[1] * dy[0])]) - #IDs associated to edge 3. + # IDs associated to edge 3. for j in range(0, deg): EL += tuple([(0, 0, leg(j, z_mid) * dx[1] * dy[1])]) - #IDs associated edge 4. + # IDs associated edge 4. for j in range(0, deg): EL += tuple([(0, leg(j, y_mid) * dx[0] * dz[0], 0)]) - #IDs associated to edge 5. + # IDs associated to edge 5. for j in range(0, deg): EL += tuple([(0, leg(j, y_mid) * dx[0] * dz[1], 0)]) - #IDs associated to edge 6. + # IDs associated to edge 6. for j in range(0, deg): EL += tuple([(0, leg(j, y_mid) * dx[1] * dz[0], 0)]) - #IDs associated to edge 7. + # IDs associated to edge 7. for j in range(0, deg): EL += tuple([(0, leg(j, y_mid) * dx[1] * dz[1], 0)]) - #IDs associated to edge 8. + # IDs associated to edge 8. for j in range(0, deg): EL += tuple([(leg(j, x_mid) * dz[0] * dy[0], 0, 0)]) - #IDs associated to edge 9. + # IDs associated to edge 9. for j in range(0, deg): EL += tuple([(leg(j, x_mid) * dz[1] * dy[0], 0, 0)]) - #IDs associated to edge 10. + # IDs associated to edge 10. for j in range(0, deg): EL += tuple([(leg(j, x_mid) * dz[0] * dy[1], 0, 0)]) - #IDs associated to edge 11. + # IDs associated to edge 11. for j in range(0, deg): EL += tuple([(leg(j, x_mid) * dz[1] * dy[1], 0, 0)]) return EL -#def f_lambda_1_3d_pieces(deg, dx, dy, dz, x_mid, y_mid, z_mid): +# def f_lambda_1_3d_pieces(deg, dx, dy, dz, x_mid, y_mid, z_mid): # FLpiece = () # for l in range(0, 2): # for j in range(0, deg - 2): @@ -294,7 +295,7 @@ def e_lambda_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid): # return FLpiece -#def f_lambda_tilde_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid): +# def f_lambda_tilde_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid): # FTilde = () # for l in range(0, 2): # FTilde += tuple([(leg(deg - 2, y_mid) * dz[l] * dy[0] * dy[1], 0, 0)] + @@ -312,53 +313,53 @@ def e_lambda_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid): def f_lambda_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid): FL = () - #IDs associated to face 0. - #FTilde first. + # IDs associated to face 0. + # FTilde first. FL += tuple([(0, leg(deg - 2, z_mid) * dx[0] * dz[0] * dz[1], 0)]) FL += tuple([(0, 0, leg(deg - 2, y_mid) * dx[0] * dy[0] * dy[1])]) for j in range(1, deg - 1): FL += tuple([(0, leg(j, y_mid) * leg(deg - j - 2, z_mid) * dx[0] * dz[0] * dz[1], -leg(j - 1, y_mid) * leg(deg - j - 1, z_mid) * dx[0] * dy[0] * dy[1])]) - #F next + # F next for i in range(2, deg): for j in range(0, i - 1): k = i - 2 - j FL += tuple([(0, leg(j, y_mid) * leg(k, z_mid) * dx[0] * dz[0] * dz[1], 0)]) FL += tuple([(0, 0, leg(j, z_mid) * leg(k, y_mid) * dx[0] * dy[0] * dy[1])]) - #IDs associated to face 1. + # IDs associated to face 1. FL += tuple([(0, leg(deg - 2, z_mid) * dx[1] * dz[0] * dz[1], 0)]) FL += tuple([(0, 0, leg(deg - 2, y_mid) * dx[1] * dy[0] * dy[1])]) for j in range(1, deg - 1): FL += tuple([(0, leg(j, y_mid) * leg(deg - j - 2, z_mid) * dx[1] * dz[0] * dz[1], -leg(j - 1, y_mid) * leg(deg - j - 1, z_mid) * dx[1] * dy[0] * dy[1])]) - #F next. + # F next. for i in range(2, deg): for j in range(0, i - 1): k = i - 2 - j FL += tuple([(0, leg(j, y_mid) * leg(k, z_mid) * dx[1] * dz[0] * dz[1], 0)]) FL += tuple([(0, 0, leg(j, z_mid) * leg(k, y_mid) * dx[1] * dy[0] * dy[1])]) - #IDs associated to face 2. - #FTilde first. + # IDs associated to face 2. + # FTilde first. FL += tuple([(leg(deg - 2, z_mid) * dy[0] * dz[0] * dz[1], 0, 0)]) FL += tuple([(0, 0, leg(deg - 2, x_mid) * dy[0] * dx[0] * dx[1])]) for j in range(1, deg - 1): FL += tuple([(leg(j, x_mid) * leg(deg - j - 2, z_mid) * dy[0] * dz[0] * dz[1], 0, -leg(j - 1, x_mid) * leg(deg - j - 1, z_mid) * dy[0] * dx[0] * dx[1])]) - #F next. + # F next. for i in range(2, deg): for j in range(0, i - 1): k = i - 2 - j FL += tuple([(leg(j, x_mid) * leg(k, z_mid) * dy[0] * dz[0] * dz[1], 0, 0)]) FL += tuple([(0, 0, leg(j, z_mid) * leg(k, x_mid) * dy[0] * dx[0] * dx[1])]) - #IDs associated to face 3. + # IDs associated to face 3. FL += tuple([(leg(deg - 2, z_mid) * dy[1] * dz[0] * dz[1], 0, 0)]) FL += tuple([(0, 0, leg(deg - 2, x_mid) * dy[1] * dx[0] * dx[1])]) for j in range(1, deg - 1): FL += tuple([(leg(j, x_mid) * leg(deg - j - 2, z_mid) * dy[1] * dz[0] * dz[1], 0, -leg(j - 1, x_mid) * leg(deg - j - 1, z_mid) * dy[1] * dx[0] * dx[1])]) - #F next. + # F next. for i in range(2, deg): for j in range(0, i - 1): k = i - 2 - j FL += tuple([(leg(j, x_mid) * leg(k, z_mid) * dy[1] * dz[0] * dz[1], 0, 0)]) FL += tuple([(0, 0, leg(j, z_mid) * leg(k, x_mid) * dy[1] * dx[0] * dx[1])]) - #IDs associated to face 4. + # IDs associated to face 4. FL += tuple([(leg(deg - 2, y_mid) * dz[0] * dy[0] * dy[1], 0, 0)]) FL += tuple([(0, leg(deg - 2, x_mid) * dz[0] * dx[0] * dx[1], 0)]) for j in range(1, deg - 1): @@ -368,11 +369,11 @@ def f_lambda_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid): k = i - 2 - j FL += tuple([(leg(j, x_mid) * leg(k, y_mid) * dz[0] * dy[0] * dy[1], 0, 0)]) FL += tuple([(0, leg(j, y_mid) * leg(k, x_mid) * dz[0] * dx[0] * dx[1], 0)]) - #IDs associated to face 5. + # IDs associated to face 5. FL += tuple([(leg(deg - 2, y_mid) * dz[1] * dy[0] * dy[1], 0, 0)]) FL += tuple([(0, leg(deg - 2, x_mid) * dz[1] * dx[0] * dx[1], 0)]) for j in range(1, deg - 1): - FL += tuple([(leg(j, x_mid) * leg(deg - j - 2, y_mid) * dz[1] * dy[0] * dy[1], -leg(j - 1, x_mid) * leg(deg -j - 1, y_mid) * dz[1] * dx[0] * dx[1], 0)]) + FL += tuple([(leg(j, x_mid) * leg(deg - j - 2, y_mid) * dz[1] * dy[0] * dy[1], -leg(j - 1, x_mid) * leg(deg - j - 1, y_mid) * dz[1] * dx[0] * dx[1], 0)]) for i in range(2, deg): for j in range(0, i - 1): k = i - 2 - j @@ -381,35 +382,33 @@ def f_lambda_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid): return FL - -#def f_lambda_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid): +# def f_lambda_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid): # F = () # for i in range(2, deg): # F += f_lambda_1_3d_pieces(deg, dx, dy, dz, x_mid, y_mid, z_mid) # F += f_lambda_tilde_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid) # return F - def I_lambda_1_3d_pieces(deg, dx, dy, dz, x_mid, y_mid, z_mid): - I = () + R = () for j in range(0, deg - 3): for k in range(0, deg - 3 - j): - l = deg - 4 - j - k - I += tuple([(leg(j, x_mid) * leg(k, y_mid) * leg(l, z_mid) * dy[0] * dy[1] * dz[0] * dz[1], 0, 0)] + - [(0, leg(j, x_mid) * leg(k, y_mid) * leg(l, z_mid) * dx[0] * dx[1] * dz[0] * dz[1], 0)] + - [(0, 0, leg(j, x_mid) * leg(k, y_mid) * leg(l, z_mid) * dx[0] * dx[1] * dy[0] * dy[1])]) - return I + m = deg - 4 - j - k + R += tuple([(leg(j, x_mid) * leg(k, y_mid) * leg(m, z_mid) * dy[0] * dy[1] * dz[0] * dz[1], 0, 0)] + + [(0, leg(j, x_mid) * leg(k, y_mid) * leg(m, z_mid) * dx[0] * dx[1] * dz[0] * dz[1], 0)] + + [(0, 0, leg(j, x_mid) * leg(k, y_mid) * leg(m, z_mid) * dx[0] * dx[1] * dy[0] * dy[1])]) + return R def I_lambda_tilde_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid): ITilde = () - if(deg==4): + if(deg == 4): ITilde += tuple([(dy[0] * dy[1] * dz[0] * dz[1], 0, 0)] + [(0, dx[0] * dx[1] * dz[0] * dz[1], 0)] + - [(0, 0, dx[0] * dx[1] * dy[0] * dy[1])]) + [(0, 0, dx[0] * dx[1] * dy[0] * dy[1])]) if(deg > 4): ITilde += tuple([(leg(deg - 4, y_mid) * dy[0] * dy[1] * dz[0] * dz[1], 0, 0)] + - [(leg(deg - 4, z_mid) * dy[0] * dy[1] * dz[0] * dz[1], 0, 0)] + + [(leg(deg - 4, z_mid) * dy[0] * dy[1] * dz[0] * dz[1], 0, 0)] + [(0, leg(deg - 4, x_mid) * dx[0] * dx[1] * dz[0] * dz[1], 0)] + [(0, leg(deg - 4, z_mid) * dx[0] * dx[1] * dz[0] * dz[1], 0)] + [(0, 0, leg(deg - 4, x_mid) * dx[0] * dx[1] * dy[0] * dy[1])] + @@ -427,14 +426,14 @@ def I_lambda_tilde_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid): def I_lambda_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid): - I = () + R = () for i in range(4, deg): - I += I_lambda_1_3d_pieces(i, dx, dy, dz, x_mid, y_mid, z_mid) - I += I_lambda_tilde_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid) - return I + R += I_lambda_1_3d_pieces(i, dx, dy, dz, x_mid, y_mid, z_mid) + R += I_lambda_tilde_1_3d(deg, dx, dy, dz, x_mid, y_mid, z_mid) + return R -#Everything for 2-d should work already. +# Everything for 2-d should work already. def e_lambda_1_2d_part_one(deg, dx, dy, x_mid, y_mid): EL = tuple( [(0, -leg(j, y_mid) * dx[0]) for j in range(deg)] + diff --git a/FIAT/SminusDiv.py b/FIAT/SminusDiv.py index dcfd8a722..e4d698b84 100644 --- a/FIAT/SminusDiv.py +++ b/FIAT/SminusDiv.py @@ -1,4 +1,4 @@ -#Working on 3d trimmed serendipity 2 forms now. +# Working on 3d trimmed serendipity 2 forms now. from sympy import symbols, legendre, Array, diff import numpy as np from FIAT.finite_element import FiniteElement @@ -14,6 +14,7 @@ def triangular_number(n): return int((n+1)*n/2) + def choose_ijk_total(degree): top = 1 for i in range(1, 2 + degree + 1): @@ -21,7 +22,7 @@ def choose_ijk_total(degree): bottom = 1 for i in range(1, degree + 1): bottom = i * bottom - return int(top /(2 * bottom)) + return int(top / (2 * bottom)) class TrimmedSerendipity(FiniteElement): @@ -33,7 +34,7 @@ def __init__(self, ref_el, degree, mapping): dim = flat_el.get_spatial_dimension() self.fdim = dim if dim != 3: - if dim !=2: + if dim != 2: raise Exception("Trimmed serendipity elements only valid for dimensions 2 and 3") flat_topology = flat_el.get_topology() @@ -44,7 +45,7 @@ def __init__(self, ref_el, degree, mapping): for entity in entities: entity_ids[top_dim][entity] = [] - #3-d case. + # 3-d case. if dim == 3: entity_ids[3] = {} for j in sorted(flat_topology[2]): @@ -60,7 +61,7 @@ def __init__(self, ref_el, degree, mapping): if (degree == 4): interior_tilde_ids += choose_ijk_total(degree - 2) - (degree - 1) - (degree - 1) + 1 if (degree > 4): - #interior_tilde_ids += choose_ijk_total(degree - 2) - (2 * degree - 1) + # interior_tilde_ids += choose_ijk_total(degree - 2) - (2 * degree - 1) interior_tilde_ids += choose_ijk_total(degree - 2) - (degree - 1) - (degree - 1) + 1 if degree == 1: interior_tilde_ids = 0 @@ -73,7 +74,7 @@ def __init__(self, ref_el, degree, mapping): if(degree >= 2): entity_ids[2][0] = list(range(cur, cur + 2*triangular_number(degree - 2) + degree)) - #entity_ids[2][0] = list(range(cur, cur + 2*triangular_number(degree - 2))) + # entity_ids[2][0] = list(range(cur, cur + 2*triangular_number(degree - 2))) cur += 2*triangular_number(degree - 2) + degree formdegree = dim - 1 @@ -177,7 +178,7 @@ def __init__(self, ref_el, degree): flat_el = flatten_reference_cube(ref_el) dim = flat_el.get_spatial_dimension() if dim != 2: - if dim !=3: + if dim != 3: raise Exception("Trimmed serendipity face elements only valid for dimensions 2 and 3") verts = flat_el.get_vertices() @@ -201,19 +202,19 @@ def __init__(self, ref_el, degree): Sminus_list = FL + IL self.basis = {(0, 0, 0): Array(Sminus_list)} super(TrimmedSerendipityDiv, self).__init__(ref_el=ref_el, degree=degree, mapping="contravariant piola") - + else: - ##Put all 2 dimensional stuff here. + # #Put all 2 dimensional stuff here. if degree < 1: raise Exception("Trimmed serendipity face elements only valid for k >= 1") - #flat_el = flatten_reference_cube(ref_el) - #verts = flat_el.get_vertices() - + # flat_el = flatten_reference_cube(ref_el) + # verts = flat_el.get_vertices() + EL = e_lambda_1_2d_part_one(degree, dx, dy, x_mid, y_mid) if degree >= 2: FL = trimmed_f_lambda_2d(degree, dx, dy, x_mid, y_mid) - #FL = F_lambda_1_2d(degree, dx, dy, x_mid, y_mid) + # FL = F_lambda_1_2d(degree, dx, dy, x_mid, y_mid) else: FL = () Sminus_list = EL + FL @@ -265,11 +266,12 @@ def f_lambda_2_3d(degree, dx, dy, dz, x_mid, y_mid, z_mid): FL += f_lambda_2_3d_pieces(degree, dx, dy, dz, x_mid, y_mid, z_mid) return FL """ + def f_lambda_2_3d(degree, dx, dy, dz, x_mid, y_mid, z_mid): FL = tuple([(-leg(j, y_mid) * leg(k, z_mid) * a, 0, 0) for a in dx for k in range(0, degree) for j in range(0, degree - k)] + - [(0, leg(j, x_mid) * leg(k, z_mid) * b, 0) + [(0, leg(j, x_mid) * leg(k, z_mid) * b, 0) for b in dy for k in range(0, degree) for j in range(0, degree - k)] + [(0, 0, -leg(j, x_mid) * leg(k, y_mid) * c) for c in dz for k in range(0, degree) for j in range(0, degree - k)]) @@ -279,16 +281,17 @@ def f_lambda_2_3d(degree, dx, dy, dz, x_mid, y_mid, z_mid): def I_lambda_2_3d_pieces(current_deg, dx, dy, dz, x_mid, y_mid, z_mid): assert current_deg > 1, 'invalid for i = 1' ILpiece = tuple([]) - for j in range(0, current_deg -1): + for j in range(0, current_deg - 1): for k in range(0, current_deg - 1 - j): ILpiece += tuple([(0, 0, -leg(j, x_mid) * leg(k, y_mid) * leg(current_deg - 2 - j - k, z_mid) * - dz[0] * dz[1])]+ - [(0, -leg(j, x_mid) * leg(k, y_mid) * leg(current_deg - 2 - j - k, z_mid) * - dy[0] * dy[1] ,0)] + - [(-leg(j, x_mid) * leg(k, y_mid) * leg(current_deg - 2 - j - k, z_mid) * dx[0] * - dx[1], 0, 0)]) + dz[0] * dz[1])] + + [(0, -leg(j, x_mid) * leg(k, y_mid) * leg(current_deg - 2 - j - k, z_mid) * + dy[0] * dy[1], 0)] + + [(-leg(j, x_mid) * leg(k, y_mid) * leg(current_deg - 2 - j - k, z_mid) * dx[0] * + dx[1], 0, 0)]) return ILpiece + """ def i_lambda_2_3d_normal(degree, dx, dy, dz, x_mid, y_mid, z_mid): IL = tuple([(-leg(l-2-j, x_mid) * leg(j-k, y_mid) * leg(k, z_mid) * @@ -301,7 +304,7 @@ def I_lambda_2_3d_pieces(current_deg, dx, dy, dz, x_mid, y_mid, z_mid): dz[0] * dz[1]) for l in range(2, degree) for j in range(l-1) for k in range(j+1)]) - return IL """ + return IL """ def I_lambda_2_3d_tilde(degree, dx, dy, dz, x_mid, y_mid, z_mid): @@ -310,17 +313,17 @@ def I_lambda_2_3d_tilde(degree, dx, dy, dz, x_mid, y_mid, z_mid): [(0, leg(degree - 2, y_mid) * dy[0] * dy[1], 0)] + [(leg(degree - 2, x_mid) * dx[0] * dx[1], 0, 0)]) IL_tilde += tuple([(leg(degree - j - 2, x_mid) * leg(j, y_mid) * dx[0] * dx[1], leg(degree - j - 1, x_mid) * - leg(j - 1, y_mid) * dy[0] * dy[1], 0) for j in range(1, degree - 1)] + + leg(j - 1, y_mid) * dy[0] * dy[1], 0) for j in range(1, degree - 1)] + [(leg(degree - j - 2, x_mid) * leg(j, z_mid) * dx[0] * dx[1], 0, leg(degree - j - 1, x_mid) * - leg(j - 1, z_mid) * dz[0] * dz[1]) for j in range(1, degree - 1)] + + leg(j - 1, z_mid) * dz[0] * dz[1]) for j in range(1, degree - 1)] + [(0, leg(degree - j - 2, y_mid) * leg(j, z_mid) * dy[0] * dy[1], leg(degree - j - 1, y_mid) * - leg(j - 1, z_mid) * dz[0] * dz[1]) for j in range(1, degree - 1)]) + leg(j - 1, z_mid) * dz[0] * dz[1]) for j in range(1, degree - 1)]) for k in range(1, degree - 2): - for l in range(1, degree - 1 - k): - j = degree - 2 - k - l - IL_tilde += tuple([(-leg(j, x_mid) * leg(k, y_mid) * leg(l, z_mid) * dx[0] * dx[1], - leg(j + 1, x_mid) * leg(k - 1, y_mid) * leg(l, z_mid) * dy[0] * dy[1], - -leg(j + 1, x_mid) * leg(k, y_mid) * leg(l - 1, z_mid) * dz[0] * dz[1])]) + for R in range(1, degree - 1 - k): + m = degree - 2 - k - R + IL_tilde += tuple([(-leg(m, x_mid) * leg(k, y_mid) * leg(R, z_mid) * dx[0] * dx[1], + leg(m + 1, x_mid) * leg(k - 1, y_mid) * leg(R, z_mid) * dy[0] * dy[1], + -leg(m + 1, x_mid) * leg(k, y_mid) * leg(R - 1, z_mid) * dz[0] * dz[1])]) return IL_tilde @@ -328,11 +331,12 @@ def I_lambda_2_3d(degree, dx, dy, dz, x_mid, y_mid, z_mid): IL = tuple([]) for j in range(2, degree): IL += I_lambda_2_3d_pieces(j, dx, dy, dz, x_mid, y_mid, z_mid) - #IL += i_lambda_2_3d_normal(degree, dx, dy, dz, x_mid, y_mid, z_mid) + # IL += i_lambda_2_3d_normal(degree, dx, dy, dz, x_mid, y_mid, z_mid) IL += I_lambda_2_3d_tilde(degree, dx, dy, dz, x_mid, y_mid, z_mid) return IL -#Everything for 2-d should work already. + +# Everything for 2-d should work already. def e_lambda_1_2d_part_one(deg, dx, dy, x_mid, y_mid): EL = tuple( [(0, -leg(j, y_mid) * dx[0]) for j in range(deg)] + @@ -417,9 +421,9 @@ def F_lambda_1_2d(deg, dx, dy, x_mid, y_mid): def trimmed_f_lambda_2d(deg, dx, dy, x_mid, y_mid): - #FL = f_lambda_1_2d_trim(deg, dx, dy, x_mid, y_mid) + # FL = f_lambda_1_2d_trim(deg, dx, dy, x_mid, y_mid) FL = F_lambda_1_2d(deg, dx, dy, x_mid, y_mid) FLT = f_lambda_1_2d_tilde(deg, dx, dy, x_mid, y_mid) result = FL + FLT - # return FL + # return FL return result diff --git a/FIAT/__init__.py b/FIAT/__init__.py index 74c439f1f..12d851215 100644 --- a/FIAT/__init__.py +++ b/FIAT/__init__.py @@ -13,8 +13,8 @@ from FIAT.brezzi_douglas_marini import BrezziDouglasMarini from FIAT.Sminus import TrimmedSerendipityEdge # noqa: F401 from FIAT.Sminus import TrimmedSerendipityFace # noqa: F401 -from FIAT.SminusDiv import TrimmedSerendipityDiv #noqa: F401 -from FIAT.SminusCurl import TrimmedSerendipityCurl #noqa: F401 +from FIAT.SminusDiv import TrimmedSerendipityDiv # noqa: F401 +from FIAT.SminusCurl import TrimmedSerendipityCurl # noqa: F401 from FIAT.brezzi_douglas_fortin_marini import BrezziDouglasFortinMarini from FIAT.discontinuous_lagrange import DiscontinuousLagrange from FIAT.discontinuous_taylor import DiscontinuousTaylor @@ -25,6 +25,7 @@ from FIAT.lagrange import Lagrange from FIAT.gauss_lobatto_legendre import GaussLobattoLegendre from FIAT.gauss_legendre import GaussLegendre +from FIAT.gauss_radau import GaussRadau from FIAT.morley import Morley from FIAT.nedelec import Nedelec from FIAT.nedelec_second_kind import NedelecSecondKind @@ -33,6 +34,9 @@ from FIAT.crouzeix_raviart import CrouzeixRaviart from FIAT.regge import Regge from FIAT.hellan_herrmann_johnson import HellanHerrmannJohnson +from FIAT.arnold_winther import ArnoldWinther +from FIAT.arnold_winther import ArnoldWintherNC +from FIAT.mardal_tai_winther import MardalTaiWinther from FIAT.bubble import Bubble, FacetBubble from FIAT.tensor_product import TensorProductElement from FIAT.enriched import EnrichedElement @@ -42,6 +46,7 @@ from FIAT.mixed import MixedElement # noqa: F401 from FIAT.restricted import RestrictedElement # noqa: F401 from FIAT.quadrature_element import QuadratureElement # noqa: F401 +from FIAT.kong_mulder_veldhuizen import KongMulderVeldhuizen # noqa: F401 # Important functionality from FIAT.quadrature import make_quadrature # noqa: F401 @@ -67,8 +72,10 @@ "Discontinuous Raviart-Thomas": DiscontinuousRaviartThomas, "Hermite": CubicHermite, "Lagrange": Lagrange, + "Kong-Mulder-Veldhuizen": KongMulderVeldhuizen, "Gauss-Lobatto-Legendre": GaussLobattoLegendre, "Gauss-Legendre": GaussLegendre, + "Gauss-Radau": GaussRadau, "Morley": Morley, "Nedelec 1st kind H(curl)": Nedelec, "Nedelec 2nd kind H(curl)": NedelecSecondKind, @@ -79,7 +86,10 @@ "TensorProductElement": TensorProductElement, "BrokenElement": DiscontinuousElement, "HDiv Trace": HDivTrace, - "Hellan-Herrmann-Johnson": HellanHerrmannJohnson} + "Hellan-Herrmann-Johnson": HellanHerrmannJohnson, + "Conforming Arnold-Winther": ArnoldWinther, + "Nonconforming Arnold-Winther": ArnoldWintherNC, + "Mardal-Tai-Winther": MardalTaiWinther} # List of extra elements extra_elements = {"P0": P0, diff --git a/FIAT/argyris.py b/FIAT/argyris.py index 805cd1f5e..ce2d0ec36 100644 --- a/FIAT/argyris.py +++ b/FIAT/argyris.py @@ -142,4 +142,4 @@ class QuinticArgyris(finite_element.CiarletElement): def __init__(self, ref_el): poly_set = polynomial_set.ONPolynomialSet(ref_el, 5) dual = QuinticArgyrisDualSet(ref_el) - super(QuinticArgyris, self).__init__(poly_set, dual, 5) + super().__init__(poly_set, dual, 5) diff --git a/FIAT/arnold_winther.py b/FIAT/arnold_winther.py new file mode 100644 index 000000000..5e8bdbbd1 --- /dev/null +++ b/FIAT/arnold_winther.py @@ -0,0 +1,166 @@ +# -*- coding: utf-8 -*- +"""Implementation of the Arnold-Winther finite elements.""" + +# Copyright (C) 2020 by Robert C. Kirby (Baylor University) +# Modified by Francis Aznaran (Oxford University) +# +# This file is part of FIAT (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later + + +from FIAT.finite_element import CiarletElement +from FIAT.dual_set import DualSet +from FIAT.polynomial_set import ONSymTensorPolynomialSet, ONPolynomialSet +from FIAT.functional import ( + PointwiseInnerProductEvaluation as InnerProduct, + FrobeniusIntegralMoment as FIM, + IntegralMomentOfTensorDivergence, + IntegralLegendreNormalNormalMoment, + IntegralLegendreNormalTangentialMoment) + +from FIAT.quadrature import make_quadrature + +import numpy + + +class ArnoldWintherNCDual(DualSet): + def __init__(self, cell, degree): + if not degree == 2: + raise ValueError("Nonconforming Arnold-Winther elements are" + "only defined for degree 2.") + dofs = [] + dof_ids = {} + dof_ids[0] = {0: [], 1: [], 2: []} + dof_ids[1] = {0: [], 1: [], 2: []} + dof_ids[2] = {0: []} + + dof_cur = 0 + # no vertex dofs + # proper edge dofs now (not the contraints) + # moments of normal . sigma against constants and linears. + for entity_id in range(3): # a triangle has 3 edges + for order in (0, 1): + dofs += [IntegralLegendreNormalNormalMoment(cell, entity_id, order, 6), + IntegralLegendreNormalTangentialMoment(cell, entity_id, order, 6)] + dof_ids[1][entity_id] = list(range(dof_cur, dof_cur+4)) + dof_cur += 4 + + # internal dofs: constant moments of three unique components + Q = make_quadrature(cell, 2) + + e1 = numpy.array([1.0, 0.0]) # euclidean basis 1 + e2 = numpy.array([0.0, 1.0]) # euclidean basis 2 + basis = [(e1, e1), (e1, e2), (e2, e2)] # basis for symmetric matrices + for (v1, v2) in basis: + v1v2t = numpy.outer(v1, v2) + fatqp = numpy.zeros((2, 2, len(Q.pts))) + for i, y in enumerate(v1v2t): + for j, x in enumerate(y): + for k in range(len(Q.pts)): + fatqp[i, j, k] = x + dofs.append(FIM(cell, Q, fatqp)) + dof_ids[2][0] = list(range(dof_cur, dof_cur + 3)) + dof_cur += 3 + + # put the constraint dofs last. + for entity_id in range(3): + dof = IntegralLegendreNormalNormalMoment(cell, entity_id, 2, 6) + dofs.append(dof) + dof_ids[1][entity_id].append(dof_cur) + dof_cur += 1 + + super(ArnoldWintherNCDual, self).__init__(dofs, cell, dof_ids) + + +class ArnoldWintherNC(CiarletElement): + """The definition of the nonconforming Arnold-Winther element. + """ + def __init__(self, cell, degree): + assert degree == 2, "Only defined for degree 2" + Ps = ONSymTensorPolynomialSet(cell, degree) + Ls = ArnoldWintherNCDual(cell, degree) + mapping = "double contravariant piola" + + super(ArnoldWintherNC, self).__init__(Ps, Ls, degree, + mapping=mapping) + + +class ArnoldWintherDual(DualSet): + def __init__(self, cell, degree): + if not degree == 3: + raise ValueError("Arnold-Winther elements are" + "only defined for degree 3.") + dofs = [] + dof_ids = {} + dof_ids[0] = {0: [], 1: [], 2: []} + dof_ids[1] = {0: [], 1: [], 2: []} + dof_ids[2] = {0: []} + + dof_cur = 0 + + # vertex dofs + vs = cell.get_vertices() + e1 = numpy.array([1.0, 0.0]) + e2 = numpy.array([0.0, 1.0]) + basis = [(e1, e1), (e1, e2), (e2, e2)] + + dof_cur = 0 + + for entity_id in range(3): + node = tuple(vs[entity_id]) + for (v1, v2) in basis: + dofs.append(InnerProduct(cell, v1, v2, node)) + dof_ids[0][entity_id] = list(range(dof_cur, dof_cur + 3)) + dof_cur += 3 + + # edge dofs now + # moments of normal . sigma against constants and linears. + for entity_id in range(3): + for order in (0, 1): + dofs += [IntegralLegendreNormalNormalMoment(cell, entity_id, order, 6), + IntegralLegendreNormalTangentialMoment(cell, entity_id, order, 6)] + dof_ids[1][entity_id] = list(range(dof_cur, dof_cur+4)) + dof_cur += 4 + + # internal dofs: constant moments of three unique components + Q = make_quadrature(cell, 3) + + e1 = numpy.array([1.0, 0.0]) # euclidean basis 1 + e2 = numpy.array([0.0, 1.0]) # euclidean basis 2 + basis = [(e1, e1), (e1, e2), (e2, e2)] # basis for symmetric matrices + for (v1, v2) in basis: + v1v2t = numpy.outer(v1, v2) + fatqp = numpy.zeros((2, 2, len(Q.pts))) + for k in range(len(Q.pts)): + fatqp[:, :, k] = v1v2t + dofs.append(FIM(cell, Q, fatqp)) + dof_ids[2][0] = list(range(dof_cur, dof_cur + 3)) + dof_cur += 3 + + # Constraint dofs + + Q = make_quadrature(cell, 5) + + onp = ONPolynomialSet(cell, 2, (2,)) + pts = Q.get_points() + onpvals = onp.tabulate(pts)[0, 0] + + for i in list(range(3, 6)) + list(range(9, 12)): + dofs.append(IntegralMomentOfTensorDivergence(cell, Q, + onpvals[i, :, :])) + + dof_ids[2][0] += list(range(dof_cur, dof_cur+6)) + + super(ArnoldWintherDual, self).__init__(dofs, cell, dof_ids) + + +class ArnoldWinther(CiarletElement): + """The definition of the conforming Arnold-Winther element. + """ + def __init__(self, cell, degree): + assert degree == 3, "Only defined for degree 3" + Ps = ONSymTensorPolynomialSet(cell, degree) + Ls = ArnoldWintherDual(cell, degree) + mapping = "double contravariant piola" + super(ArnoldWinther, self).__init__(Ps, Ls, degree, mapping=mapping) diff --git a/FIAT/barycentric_interpolation.py b/FIAT/barycentric_interpolation.py new file mode 100644 index 000000000..ceee2f1a7 --- /dev/null +++ b/FIAT/barycentric_interpolation.py @@ -0,0 +1,46 @@ +# Copyright (C) 2021 Pablo D. Brubeck +# +# This file is part of FIAT (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later +# +# Written by Pablo D. Brubeck (brubeck@protonmail.com), 2021 + +import numpy + + +def barycentric_interpolation(xsrc, xdst, order=0): + """Return tabulations of a 1D Lagrange nodal basis via the second barycentric interpolation formula + + See Berrut and Trefethen (2004) https://doi.org/10.1137/S0036144502417715 Eq. (4.2) & (9.4) + + :arg xsrc: a :class:`numpy.array` with the nodes defining the Lagrange polynomial basis + :arg xdst: a :class:`numpy.array` with the interpolation points + :arg order: the integer order of differentiation + :returns: dict of tabulations up to the given order (in the same format as :meth:`~.CiarletElement.tabulate`) + """ + + # w = barycentric weights + # D = spectral differentiation matrix (D.T : u(xsrc) -> u'(xsrc)) + # I = barycentric interpolation matrix (I.T : u(xsrc) -> u(xdst)) + + D = numpy.add.outer(-xsrc, xsrc) + numpy.fill_diagonal(D, 1.0E0) + w = 1.0E0 / numpy.prod(D, axis=0) + D = numpy.divide.outer(w, w) / D + numpy.fill_diagonal(D, numpy.diag(D) - numpy.sum(D, axis=0)) + + I = numpy.add.outer(-xsrc, xdst) + idx = numpy.argwhere(numpy.isclose(I, 0.0E0, 1E-14)) + I[idx[:, 0], idx[:, 1]] = 1.0E0 + I = 1.0E0 / I + I *= w[:, None] + I[:, idx[:, 1]] = 0.0E0 + I[idx[:, 0], idx[:, 1]] = 1.0E0 + I = (1.0E0 / numpy.sum(I, axis=0)) * I + + derivs = {(0,): I} + for k in range(0, order): + derivs[(k+1,)] = numpy.matmul(D, derivs[(k,)]) + + return derivs diff --git a/FIAT/brezzi_douglas_marini.py b/FIAT/brezzi_douglas_marini.py index 96afe4e36..64cc5bf52 100644 --- a/FIAT/brezzi_douglas_marini.py +++ b/FIAT/brezzi_douglas_marini.py @@ -7,10 +7,11 @@ from FIAT import (finite_element, quadrature, functional, dual_set, polynomial_set, nedelec) +from FIAT.check_format_variant import check_format_variant class BDMDualSet(dual_set.DualSet): - def __init__(self, ref_el, degree): + def __init__(self, ref_el, degree, variant, quad_deg): # Initialize containers for map: mesh_entity -> dof number and # dual basis @@ -20,28 +21,55 @@ def __init__(self, ref_el, degree): sd = ref_el.get_spatial_dimension() t = ref_el.get_topology() - # Define each functional for the dual set - # codimension 1 facets - for i in range(len(t[sd - 1])): - pts_cur = ref_el.make_points(sd - 1, i, sd + degree) - for j in range(len(pts_cur)): - pt_cur = pts_cur[j] - f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur) - nodes.append(f) - - # internal nodes - if degree > 1: - Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1)) - qpts = Q.get_points() - Nedel = nedelec.Nedelec(ref_el, degree - 1) - Nedfs = Nedel.get_nodal_basis() - zero_index = tuple([0 for i in range(sd)]) - Ned_at_qpts = Nedfs.tabulate(qpts)[zero_index] - - for i in range(len(Ned_at_qpts)): - phi_cur = Ned_at_qpts[i, :] - l_cur = functional.FrobeniusIntegralMoment(ref_el, Q, phi_cur) - nodes.append(l_cur) + if variant == "integral": + facet = ref_el.get_facet_element() + # Facet nodes are \int_F v\cdot n p ds where p \in P_{q-1} + # degree is q - 1 + Q = quadrature.make_quadrature(facet, quad_deg) + Pq = polynomial_set.ONPolynomialSet(facet, degree) + Pq_at_qpts = Pq.tabulate(Q.get_points())[tuple([0]*(sd - 1))] + for f in range(len(t[sd - 1])): + for i in range(Pq_at_qpts.shape[0]): + phi = Pq_at_qpts[i, :] + nodes.append(functional.IntegralMomentOfScaledNormalEvaluation(ref_el, Q, phi, f)) + + # internal nodes + if degree > 1: + Q = quadrature.make_quadrature(ref_el, quad_deg) + qpts = Q.get_points() + Nedel = nedelec.Nedelec(ref_el, degree - 1, variant) + Nedfs = Nedel.get_nodal_basis() + zero_index = tuple([0 for i in range(sd)]) + Ned_at_qpts = Nedfs.tabulate(qpts)[zero_index] + + for i in range(len(Ned_at_qpts)): + phi_cur = Ned_at_qpts[i, :] + l_cur = functional.FrobeniusIntegralMoment(ref_el, Q, phi_cur) + nodes.append(l_cur) + + elif variant == "point": + # Define each functional for the dual set + # codimension 1 facets + for i in range(len(t[sd - 1])): + pts_cur = ref_el.make_points(sd - 1, i, sd + degree) + for j in range(len(pts_cur)): + pt_cur = pts_cur[j] + f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur) + nodes.append(f) + + # internal nodes + if degree > 1: + Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1)) + qpts = Q.get_points() + Nedel = nedelec.Nedelec(ref_el, degree - 1, variant) + Nedfs = Nedel.get_nodal_basis() + zero_index = tuple([0 for i in range(sd)]) + Ned_at_qpts = Nedfs.tabulate(qpts)[zero_index] + + for i in range(len(Ned_at_qpts)): + phi_cur = Ned_at_qpts[i, :] + l_cur = functional.FrobeniusIntegralMoment(ref_el, Q, phi_cur) + nodes.append(l_cur) # sets vertices (and in 3d, edges) to have no nodes for i in range(sd - 1): @@ -71,16 +99,32 @@ def __init__(self, ref_el, degree): class BrezziDouglasMarini(finite_element.CiarletElement): - """The BDM element""" + """ + The BDM element + + :arg ref_el: The reference element. + :arg k: The degree. + :arg variant: optional variant specifying the types of nodes. + + variant can be chosen from ["point", "integral", "integral(quadrature_degree)"] + "point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal + convergence order in the H(div)-norm + "integral" -> dofs are evaluated by quadrature rule. The quadrature degree is chosen to integrate + polynomials of degree 5*k so that most expressions will be interpolated exactly. This is important + when you want to have (nearly) divergence-preserving interpolation. + "integral(quadrature_degree)" -> dofs are evaluated by quadrature rule of degree quadrature_degree + """ + + def __init__(self, ref_el, k, variant=None): - def __init__(self, ref_el, degree): + (variant, quad_deg) = check_format_variant(variant, k, "BDM") - if degree < 1: + if k < 1: raise Exception("BDM_k elements only valid for k >= 1") sd = ref_el.get_spatial_dimension() - poly_set = polynomial_set.ONPolynomialSet(ref_el, degree, (sd, )) - dual = BDMDualSet(ref_el, degree) + poly_set = polynomial_set.ONPolynomialSet(ref_el, k, (sd, )) + dual = BDMDualSet(ref_el, k, variant, quad_deg) formdegree = sd - 1 # (n-1)-form - super(BrezziDouglasMarini, self).__init__(poly_set, dual, degree, formdegree, + super(BrezziDouglasMarini, self).__init__(poly_set, dual, k, formdegree, mapping="contravariant piola") diff --git a/FIAT/check_format_variant.py b/FIAT/check_format_variant.py new file mode 100644 index 000000000..14c5ba594 --- /dev/null +++ b/FIAT/check_format_variant.py @@ -0,0 +1,25 @@ +import re +import warnings + + +def check_format_variant(variant, degree, element): + if variant is None: + variant = "point" + warnings.simplefilter('always', DeprecationWarning) + warnings.warn('Variant of ' + element + ' element will change from point evaluation to integral evaluation.' + ' You should project into variant="integral"', DeprecationWarning) + + match = re.match(r"^integral(?:\((\d+)\))?$", variant) + if match: + variant = "integral" + quad_degree, = match.groups() + quad_degree = int(quad_degree) if quad_degree is not None else 5*(degree + 1) + if quad_degree < degree + 1: + raise ValueError("Warning, quadrature degree should be at least %s" % (degree + 1)) + elif variant == "point": + quad_degree = None + else: + raise ValueError('Choose either variant="point" or variant="integral"' + 'or variant="integral(Quadrature degree)"') + + return (variant, quad_degree) diff --git a/FIAT/dual_set.py b/FIAT/dual_set.py index 26759ba79..215048b74 100644 --- a/FIAT/dual_set.py +++ b/FIAT/dual_set.py @@ -1,10 +1,15 @@ # Copyright (C) 2008-2012 Robert C. Kirby (Texas Tech University) # +# Modified 2020 by the same at Baylor University. +# # This file is part of FIAT (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import numpy +import collections + +from FIAT import polynomial_set class DualSet(object): @@ -39,18 +44,94 @@ def get_reference_element(self): return self.ref_el def to_riesz(self, poly_set): + r"""This method gives the action of the entire dual set + on each member of the expansion set underlying poly_set. + Then, applying the linear functionals of the dual set to an + arbitrary polynomial in poly_set is accomplished by (generalized) + matrix multiplication. + + For scalar-valued spaces, this produces a matrix + :\math:`R_{i, j}` such that + :\math:`\ell_i(f) = \sum_{j} a_j \ell_i(\phi_j)` + for :\math:`f=\sum_{j} a_j \phi_j`. + + More generally, it will have shape concatenating + the number of functionals in the dual set, the value shape + of functions it takes, and the number of members of the + expansion set. + """ + + # This rather technical code queries the low-level information + # in pt_dict and deriv_dict + # for each functional to find out where it evaluates its + # inputs and/or their derivatives. Then, it tabulates the + # expansion set one time for all the function values and + # another for all of the derivatives. This circumvents + # needing to call the to_riesz method of each functional and + # also limits the number of different calls to tabulate. tshape = self.nodes[0].target_shape num_nodes = len(self.nodes) es = poly_set.get_expansion_set() + ed = poly_set.get_embedded_degree() num_exp = es.get_num_members(poly_set.get_embedded_degree()) riesz_shape = tuple([num_nodes] + list(tshape) + [num_exp]) self.mat = numpy.zeros(riesz_shape, "d") - for i in range(len(self.nodes)): - self.mat[i][:] = self.nodes[i].to_riesz(poly_set) + # Dictionaries mapping pts to which functionals they come from + pts_to_ells = collections.OrderedDict() + dpts_to_ells = collections.OrderedDict() + + for i, ell in enumerate(self.nodes): + for pt in ell.pt_dict: + if pt in pts_to_ells: + pts_to_ells[pt].append(i) + else: + pts_to_ells[pt] = [i] + + for pt in ell.deriv_dict: + if pt in dpts_to_ells: + dpts_to_ells[pt].append(i) + else: + dpts_to_ells[pt] = [i] + + # Now tabulate the function values + pts = list(pts_to_ells.keys()) + expansion_values = es.tabulate(ed, pts) + + for j, pt in enumerate(pts): + which_ells = pts_to_ells[pt] + + for k in which_ells: + pt_dict = self.nodes[k].pt_dict + wc_list = pt_dict[pt] + + for i in range(num_exp): + for (w, c) in wc_list: + self.mat[k][c][i] += w*expansion_values[i, j] + + # Tabulate the derivative values that are needed + max_deriv_order = max([ell.max_deriv_order for ell in self.nodes]) + if max_deriv_order > 0: + dpts = list(dpts_to_ells.keys()) + # It's easiest/most efficient to get derivatives of the + # expansion set through the polynomial set interface. + # This is creating a short-lived set to do just this. + expansion = polynomial_set.ONPolynomialSet(self.ref_el, ed) + dexpansion_values = expansion.tabulate(dpts, max_deriv_order) + + for j, pt in enumerate(dpts): + which_ells = dpts_to_ells[pt] + + for k in which_ells: + dpt_dict = self.nodes[k].deriv_dict + wac_list = dpt_dict[pt] + + for i in range(num_exp): + for (w, alpha, c) in wac_list: + self.mat[k][c][i] += w*dexpansion_values[alpha][i, j] return self.mat diff --git a/FIAT/expansions.py b/FIAT/expansions.py index c4b8a99c0..a5fc112ac 100644 --- a/FIAT/expansions.py +++ b/FIAT/expansions.py @@ -109,6 +109,34 @@ def xi_tetrahedron(eta): return xi1, xi2, xi3 +class PointExpansionSet(object): + """Evaluates the point basis on a point reference element.""" + + def __init__(self, ref_el): + if ref_el.get_spatial_dimension() != 0: + raise ValueError("Must have a point") + self.ref_el = ref_el + self.base_ref_el = reference_element.Point() + + def get_num_members(self, n): + return 1 + + def tabulate(self, n, pts): + """Returns a numpy array A[i,j] = phi_i(pts[j]) = 1.0.""" + assert n == 0 + return numpy.ones((1, len(pts))) + + def tabulate_derivatives(self, n, pts): + """Returns a numpy array of size A where A[i,j] = phi_i(pts[j]) + but where each element is an empty tuple (). This maintains + compatibility with the interfaces of the interval, triangle and + tetrahedron expansions.""" + deriv_vals = numpy.empty_like(self.tabulate(n, pts), dtype=tuple) + deriv_vals.fill(()) + + return deriv_vals + + class LineExpansionSet(object): """Evaluates the Legendre basis on a line reference element.""" @@ -377,7 +405,9 @@ def tabulate_jet(self, n, pts, order=1): def get_expansion_set(ref_el): """Returns an ExpansionSet instance appopriate for the given reference element.""" - if ref_el.get_shape() == reference_element.LINE: + if ref_el.get_shape() == reference_element.POINT: + return PointExpansionSet(ref_el) + elif ref_el.get_shape() == reference_element.LINE: return LineExpansionSet(ref_el) elif ref_el.get_shape() == reference_element.TRIANGLE: return TriangleExpansionSet(ref_el) @@ -390,11 +420,15 @@ def get_expansion_set(ref_el): def polynomial_dimension(ref_el, degree): """Returns the dimension of the space of polynomials of degree no greater than degree on the reference element.""" - if ref_el.get_shape() == reference_element.LINE: + if ref_el.get_shape() == reference_element.POINT: + if degree > 0: + raise ValueError("Only degree zero polynomials supported on point elements.") + return 1 + elif ref_el.get_shape() == reference_element.LINE: return max(0, degree + 1) elif ref_el.get_shape() == reference_element.TRIANGLE: return max((degree + 1) * (degree + 2) // 2, 0) elif ref_el.get_shape() == reference_element.TETRAHEDRON: return max(0, (degree + 1) * (degree + 2) * (degree + 3) // 6) else: - raise Exception("Unknown reference element type.") + raise ValueError("Unknown reference element type.") diff --git a/FIAT/functional.py b/FIAT/functional.py index 22db52cf5..44711ca6d 100644 --- a/FIAT/functional.py +++ b/FIAT/functional.py @@ -1,5 +1,7 @@ # Copyright (C) 2008 Robert C. Kirby (Texas Tech University) # +# Modified 2020 by the same from Baylor University +# # This file is part of FIAT (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later @@ -15,6 +17,10 @@ import numpy import sympy +from FIAT import polynomial_set +from FIAT.quadrature import GaussLegendreQuadratureLineRule, QuadratureRule +from FIAT.reference_element import UFCInterval as interval + def index_iterator(shp): """Constructs a generator iterating over all indices in @@ -31,19 +37,33 @@ def index_iterator(shp): for foo in index_iterator(shp_foo): yield [i] + foo -# also put in a "jet_dict" that maps -# pt --> {wt, multiindex, comp} -# the multiindex is an iterable of nonnegative -# integers - class Functional(object): - """Class implementing an abstract functional. - All functionals are discrete in the sense that - the are written as a weighted sum of (components of) their - argument evaluated at particular points.""" - - def __init__(self, ref_el, target_shape, pt_dict, deriv_dict, functional_type): + r"""Abstract class representing a linear functional. + All FIAT functionals are discrete in the sense that + they are written as a weighted sum of (derivatives of components of) their + argument evaluated at particular points. + + :arg ref_el: a :class:`Cell` + :arg target_shape: a tuple indicating the value shape of functions on + the functional operates (e.g. if the function eats 2-vectors + then target_shape is (2,) and if it eats scalars then + target_shape is () + :arg pt_dict: A dict mapping points to lists of information about + how the functional is evaluated. Each entry in the list takes + the form of a tuple (wt, comp) so that (at least if the + deriv_dict argument is empty), the functional takes the form + :math:`\ell(f) = \sum_{q=1}^{N_q} \sum_{k=1}^{K_q} w^q_k f_{c_k}(x_q)` + where :math:`f_{c_k}` indicates a particular vector or tensor component + :arg deriv_dict: A dict that is similar to `pt_dict`, although the entries + of each list are tuples (wt, alpha, comp) with alpha a tuple + of nonnegative integers corresponding to the order of partial + differentiation in each spatial direction. + :arg functional_type: a string labeling the kind of functional + this is. + """ + def __init__(self, ref_el, target_shape, pt_dict, deriv_dict, + functional_type): self.ref_el = ref_el self.target_shape = target_shape self.pt_dict = pt_dict @@ -51,7 +71,7 @@ def __init__(self, ref_el, target_shape, pt_dict, deriv_dict, functional_type): self.functional_type = functional_type if len(deriv_dict) > 0: per_point = list(chain(*deriv_dict.values())) - alphas = [foo[1] for foo in per_point] + alphas = [tuple(foo[1]) for foo in per_point] self.max_deriv_order = max([sum(foo) for foo in alphas]) else: self.max_deriv_order = 0 @@ -83,36 +103,70 @@ def get_type_tag(self): normal component, which is probably handy for clients of FIAT""" return self.functional_type - # overload me in subclasses to make life easier!! def to_riesz(self, poly_set): - """Constructs an array representation of the functional over - the base of the given polynomial_set so that f(phi) for any - phi in poly_set is given by a dot product.""" + r"""Constructs an array representation of the functional so + that the functional may be applied to a function expressed in + in terms of the expansion set underlying `poly_set` by means + of contracting coefficients. + + That is, `poly_set` will have members all expressed in the + form :math:`p = \sum_{i} \alpha^i \phi_i` + where :math:`\{\phi_i\}_{i}` is some orthonormal expansion set + and :math:`\alpha^i` are coefficients. Note: the orthonormal + expansion set is always scalar-valued but if the members of + `poly_set` are vector or tensor valued the :math:`\alpha^i` + will be scalars or vectors. + + This function constructs a tensor :math:`R` such that the + contraction of :math:`R` with the array of coefficients + :math:`\alpha` produces the effect of :math:`\ell(f)` + + In the case of scalar-value functions, :math:`R` is just a + vector of the same length as the expansion set, and + :math:`R_i = \ell(\phi_i)`. For vector-valued spaces, + :math:`R_{ij}` will be :math:`\ell(e^i \phi_j)` where + :math:`e^i` is the canonical unit vector nonzero only in one + entry :math:`i`. + """ es = poly_set.get_expansion_set() ed = poly_set.get_embedded_degree() + nexp = es.get_num_members(ed) + pt_dict = self.get_point_dict() pts = list(pt_dict.keys()) - - # bfs is matrix that is pdim rows by num_pts cols - # where pdim is the polynomial dimension + npts = len(pts) bfs = es.tabulate(ed, pts) - result = numpy.zeros(poly_set.coeffs.shape[1:], "d") # loop over points - for j in range(len(pts)): + for j in range(npts): pt_cur = pts[j] wc_list = pt_dict[pt_cur] # loop over expansion functions - for i in range(bfs.shape[0]): + for i in range(nexp): for (w, c) in wc_list: result[c][i] += w * bfs[i, j] if self.deriv_dict: - raise NotImplementedError("Generic to_riesz implementation does not support derivatives") + dpt_dict = self.deriv_dict + + # this makes things quicker since it uses dmats after + # instantiation + es_foo = polynomial_set.ONPolynomialSet(self.ref_el, ed) + dpts = list(dpt_dict.keys()) + + dbfs = es_foo.tabulate(dpts, self.max_deriv_order) + + ndpts = len(dpts) + for j in range(ndpts): + dpt_cur = dpts[j] + wac_list = dpt_dict[dpt_cur] + for i in range(nexp): + for (w, alpha, c) in wac_list: + result[c][i] += w * dbfs[tuple(alpha)][i, j] return result @@ -161,8 +215,8 @@ class PointDerivative(Functional): functions at a particular point x.""" def __init__(self, ref_el, x, alpha): - dpt_dict = {x: [(1.0, alpha, tuple())]} - self.alpha = alpha + dpt_dict = {x: [(1.0, tuple(alpha), tuple())]} + self.alpha = tuple(alpha) self.order = sum(self.alpha) Functional.__init__(self, ref_el, tuple(), {}, dpt_dict, "PointDeriv") @@ -180,28 +234,9 @@ def __call__(self, fn): return sympy.diff(fn(X), *dvars).evalf(subs=dict(zip(dX, x))) - def to_riesz(self, poly_set): - x = list(self.deriv_dict.keys())[0] - - X = sympy.DeferredVector('x') - dx = numpy.asarray([X[i] for i in range(len(x))]) - - es = poly_set.get_expansion_set() - ed = poly_set.get_embedded_degree() - - bfs = es.tabulate(ed, [dx])[:, 0] - - # Expand the multi-index as a series of variables to - # differentiate with respect to. - dvars = tuple(d for d, a in zip(dx, self.alpha) - for count in range(a)) - - return numpy.asarray([sympy.lambdify(X, sympy.diff(b, *dvars))(x) - for b in bfs]) - class PointNormalDerivative(Functional): - + """Represents d/dn at a point on a facet.""" def __init__(self, ref_el, facet_no, pt): n = ref_el.compute_normal(facet_no) self.n = n @@ -212,49 +247,51 @@ def __init__(self, ref_el, facet_no, pt): alpha = [0] * sd alpha[i] = 1 alphas.append(alpha) - dpt_dict = {pt: [(n[i], alphas[i], tuple()) for i in range(sd)]} + dpt_dict = {pt: [(n[i], tuple(alphas[i]), tuple()) for i in range(sd)]} Functional.__init__(self, ref_el, tuple(), {}, dpt_dict, "PointNormalDeriv") - def to_riesz(self, poly_set): - x = list(self.deriv_dict.keys())[0] - - X = sympy.DeferredVector('x') - dx = numpy.asarray([X[i] for i in range(len(x))]) - es = poly_set.get_expansion_set() - ed = poly_set.get_embedded_degree() +class PointNormalSecondDerivative(Functional): + """Represents d^/dn^2 at a point on a facet.""" + def __init__(self, ref_el, facet_no, pt): + n = ref_el.compute_normal(facet_no) + self.n = n + sd = ref_el.get_spatial_dimension() + tau = numpy.zeros((sd*(sd+1)//2,)) - bfs = es.tabulate(ed, [dx])[:, 0] + alphas = [] + cur = 0 + for i in range(sd): + for j in range(i, sd): + alpha = [0] * sd + alpha[i] += 1 + alpha[j] += 1 + alphas.append(tuple(alpha)) + tau[cur] = n[i]*n[j] + cur += 1 + + self.tau = tau + self.alphas = alphas + dpt_dict = {pt: [(n[i], alphas[i], tuple()) for i in range(sd)]} - # We need the gradient dotted with the normal. - return numpy.asarray( - [sympy.lambdify( - X, sum([sympy.diff(b, dxi)*ni - for dxi, ni in zip(dx, self.n)]))(x) - for b in bfs]) + Functional.__init__(self, ref_el, tuple(), {}, dpt_dict, "PointNormalDeriv") class IntegralMoment(Functional): - """An IntegralMoment is a functional""" + """Functional representing integral of the input against some tabulated function f. + + :arg ref_el: a :class:`Cell`. + :arg Q: a :class:`QuadratureRule`. + :arg f_at_qpts: an array tabulating the function f at the quadrature + points. + :arg comp: Optional argument indicating that only a particular + component of the input function should be integrated against f + :arg shp: Optional argument giving the value shape of input functions. + """ def __init__(self, ref_el, Q, f_at_qpts, comp=tuple(), shp=tuple()): - """ - Create IntegralMoment - - *Arguments* - - ref_el - The reference element (cell) - Q (QuadratureRule) - A quadrature rule for the integral - f_at_qpts - ??? - comp (tuple) - A component ??? (Optional) - shp (tuple) - The shape ??? (Optional) - """ + self.Q = Q qpts, qwts = Q.get_points(), Q.get_weights() pt_dict = OrderedDict() self.comp = comp @@ -273,21 +310,6 @@ def __call__(self, fn): result = result[self.comp] return result - def to_riesz(self, poly_set): - es = poly_set.get_expansion_set() - ed = poly_set.get_embedded_degree() - pts = list(self.pt_dict.keys()) - bfs = es.tabulate(ed, pts) - wts = numpy.array([foo[0][0] for foo in list(self.pt_dict.values())]) - result = numpy.zeros(poly_set.coeffs.shape[1:], "d") - - if len(self.comp) == 0: - result[:] = numpy.dot(bfs, wts) - else: - result[self.comp, :] = numpy.dot(bfs, wts) - - return result - class IntegralMomentOfNormalDerivative(Functional): """Functional giving normal derivative integrated against some function on a facet.""" @@ -309,62 +331,179 @@ def __init__(self, ref_el, facet_no, Q, f_at_qpts): dpt_dict = OrderedDict() - alphas = [[1 if j == i else 0 for j in range(sd)] for i in range(sd)] + alphas = [tuple([1 if j == i else 0 for j in range(sd)]) for i in range(sd)] for j, pt in enumerate(dpts): - dpt_dict[tuple(pt)] = [(qwts[j]*n[i], alphas[i], tuple()) for i in range(sd)] + dpt_dict[tuple(pt)] = [(qwts[j]*n[i]*f_at_qpts[j], alphas[i], tuple()) for i in range(sd)] Functional.__init__(self, ref_el, tuple(), {}, dpt_dict, "IntegralMomentOfNormalDerivative") - def to_riesz(self, poly_set): - es = poly_set.get_expansion_set() - ed = poly_set.get_embedded_degree() - result = numpy.zeros(es.get_num_members(ed)) - sd = self.ref_el.get_spatial_dimension() +class IntegralLegendreDirectionalMoment(Functional): + """Moment of v.s against a Legendre polynomial over an edge""" + def __init__(self, cell, s, entity, mom_deg, comp_deg, nm=""): + sd = cell.get_spatial_dimension() + assert sd == 2 + shp = (sd,) + quadpoints = comp_deg + 1 + Q = GaussLegendreQuadratureLineRule(interval(), quadpoints) + legendre = numpy.polynomial.legendre.legval(2*Q.get_points()-1, [0]*mom_deg + [1]) + f_at_qpts = numpy.array([s*legendre[i] for i in range(quadpoints)]) + fmap = cell.get_entity_transform(sd-1, entity) + mappedqpts = [fmap(pt) for pt in Q.get_points()] + mappedQ = QuadratureRule(cell, mappedqpts, Q.get_weights()) + qwts = mappedQ.wts + qpts = mappedQ.pts - X = sympy.DeferredVector('x') - dX = numpy.asarray([X[i] for i in range(sd)]) + pt_dict = OrderedDict() - # evaluate bfs symbolically - bfs = es.tabulate(ed, [dX])[:, 0] + for k in range(len(qpts)): + pt_cur = tuple(qpts[k]) + pt_dict[pt_cur] = [(qwts[k] * f_at_qpts[k, i], (i,)) + for i in range(2)] - n = self.n - qwts = self.Q.get_weights() + super().__init__(cell, shp, pt_dict, {}, nm) - for i in range(len(result)): - thing = sympy.lambdify( - X, sum([sympy.diff(bfs[i], dxi)*ni - for dxi, ni in zip(dX, n)])) - for j, pt in enumerate(self.deriv_dict.keys()): - result[i] += qwts[j] * self.f_at_qpts[j] * thing(pt) +class IntegralLegendreNormalMoment(IntegralLegendreDirectionalMoment): + """Moment of v.n against a Legendre polynomial over an edge""" + def __init__(self, cell, entity, mom_deg, comp_deg): + n = cell.compute_scaled_normal(entity) + super().__init__(cell, n, entity, mom_deg, comp_deg, + "IntegralLegendreNormalMoment") - return result + +class IntegralLegendreTangentialMoment(IntegralLegendreDirectionalMoment): + """Moment of v.t against a Legendre polynomial over an edge""" + def __init__(self, cell, entity, mom_deg, comp_deg): + t = cell.compute_edge_tangent(entity) + super().__init__(cell, t, entity, mom_deg, comp_deg, + "IntegralLegendreTangentialMoment") + + +class IntegralLegendreBidirectionalMoment(Functional): + """Moment of dot(s1, dot(tau, s2)) against Legendre on entity, multiplied by the size of the reference facet""" + def __init__(self, cell, s1, s2, entity, mom_deg, comp_deg, nm=""): + # mom_deg is degree of moment, comp_deg is the total degree of + # polynomial you might need to integrate (or something like that) + sd = cell.get_spatial_dimension() + shp = (sd, sd) + + s1s2T = numpy.outer(s1, s2) + quadpoints = comp_deg + 1 + Q = GaussLegendreQuadratureLineRule(interval(), quadpoints) + + # The volume squared gets the Jacobian mapping from line interval + # and the edge length into the functional. + legendre = numpy.polynomial.legendre.legval(2*Q.get_points()-1, [0]*mom_deg + [1]) * numpy.abs(cell.volume_of_subcomplex(1, entity))**2 + + f_at_qpts = numpy.array([s1s2T*legendre[i] for i in range(quadpoints)]) + + # Map the quadrature points + fmap = cell.get_entity_transform(sd-1, entity) + mappedqpts = [fmap(pt) for pt in Q.get_points()] + mappedQ = QuadratureRule(cell, mappedqpts, Q.get_weights()) + + pt_dict = OrderedDict() + + qpts = mappedQ.pts + qwts = mappedQ.wts + + for k in range(len(qpts)): + pt_cur = tuple(qpts[k]) + pt_dict[pt_cur] = [(qwts[k] * f_at_qpts[k, i, j], (i, j)) + for (i, j) in index_iterator(shp)] + + super().__init__(cell, shp, pt_dict, {}, nm) + + +class IntegralLegendreNormalNormalMoment(IntegralLegendreBidirectionalMoment): + """Moment of dot(n, dot(tau, n)) against Legendre on entity.""" + def __init__(self, cell, entity, mom_deg, comp_deg): + n = cell.compute_normal(entity) + super().__init__(cell, n, n, entity, mom_deg, comp_deg, + "IntegralNormalNormalLegendreMoment") + + +class IntegralLegendreNormalTangentialMoment(IntegralLegendreBidirectionalMoment): + """Moment of dot(n, dot(tau, t)) against Legendre on entity.""" + def __init__(self, cell, entity, mom_deg, comp_deg): + n = cell.compute_normal(entity) + t = cell.compute_normalized_edge_tangent(entity) + super().__init__(cell, n, t, entity, mom_deg, comp_deg, + "IntegralNormalTangentialLegendreMoment") + + +class IntegralMomentOfDivergence(Functional): + """Functional representing integral of the divergence of the input + against some tabulated function f.""" + def __init__(self, ref_el, Q, f_at_qpts): + self.f_at_qpts = f_at_qpts + self.Q = Q + + sd = ref_el.get_spatial_dimension() + + qpts, qwts = Q.get_points(), Q.get_weights() + dpts = qpts + self.dpts = dpts + + dpt_dict = OrderedDict() + + alphas = [tuple([1 if j == i else 0 for j in range(sd)]) for i in range(sd)] + for j, pt in enumerate(dpts): + dpt_dict[tuple(pt)] = [(qwts[j]*f_at_qpts[j], alphas[i], (i,)) for i in range(sd)] + + super().__init__(ref_el, tuple(), {}, dpt_dict, + "IntegralMomentOfDivergence") + + +class IntegralMomentOfTensorDivergence(Functional): + """Like IntegralMomentOfDivergence, but on symmetric tensors.""" + + def __init__(self, ref_el, Q, f_at_qpts): + self.f_at_qpts = f_at_qpts + self.Q = Q + qpts, qwts = Q.get_points(), Q.get_weights() + nqp = len(qpts) + dpts = qpts + self.dpts = dpts + + assert len(f_at_qpts.shape) == 2 + assert f_at_qpts.shape[0] == 2 + assert f_at_qpts.shape[1] == nqp + + sd = ref_el.get_spatial_dimension() + + dpt_dict = OrderedDict() + + alphas = [tuple([1 if j == i else 0 for j in range(sd)]) for i in range(sd)] + for q, pt in enumerate(dpts): + dpt_dict[tuple(pt)] = [(qwts[q]*f_at_qpts[i, q], alphas[j], (i, j)) for i in range(2) for j in range(2)] + + super().__init__(ref_el, tuple(), {}, dpt_dict, + "IntegralMomentOfDivergence") class FrobeniusIntegralMoment(Functional): def __init__(self, ref_el, Q, f_at_qpts): - # f_at_qpts is num components x num_qpts - if len(Q.get_points()) != f_at_qpts.shape[1]: + # f_at_qpts is (some shape) x num_qpts + shp = tuple(f_at_qpts.shape[:-1]) + if len(Q.get_points()) != f_at_qpts.shape[-1]: raise Exception("Mismatch in number of quadrature points and values") - # make sure that shp is same shape as f given - shp = (f_at_qpts.shape[0],) - qpts, qwts = Q.get_points(), Q.get_weights() pt_dict = {} - for i in range(len(qpts)): - pt_cur = tuple(qpts[i]) - pt_dict[pt_cur] = [(qwts[i] * f_at_qpts[j, i], (j,)) - for j in range(f_at_qpts.shape[0])] - Functional.__init__(self, ref_el, shp, pt_dict, {}, "FrobeniusIntegralMoment") + for i, (pt_cur, wt_cur) in enumerate(zip(map(tuple, qpts), qwts)): + pt_dict[pt_cur] = [] + for alfa in index_iterator(shp): + qpidx = tuple(alfa + [i]) + pt_dict[pt_cur].append((wt_cur * f_at_qpts[qpidx], tuple(alfa))) + + super().__init__(ref_el, shp, pt_dict, {}, "FrobeniusIntegralMoment") -# point normals happen on a d-1 dimensional facet -# pt is the "physical" point on that facet class PointNormalEvaluation(Functional): """Implements the evaluation of the normal component of a vector at a point on a facet of codimension 1.""" @@ -377,7 +516,7 @@ def __init__(self, ref_el, facet_no, pt): pt_dict = {pt: [(n[i], (i,)) for i in range(sd)]} shp = (sd,) - Functional.__init__(self, ref_el, shp, pt_dict, {}, "PointNormalEval") + super().__init__(ref_el, shp, pt_dict, {}, "PointNormalEval") class PointEdgeTangentEvaluation(Functional): @@ -390,17 +529,35 @@ def __init__(self, ref_el, edge_no, pt): sd = ref_el.get_spatial_dimension() pt_dict = {pt: [(t[i], (i,)) for i in range(sd)]} shp = (sd,) - Functional.__init__(self, ref_el, shp, pt_dict, {}, "PointEdgeTangent") + super().__init__(ref_el, shp, pt_dict, {}, "PointEdgeTangent") def tostr(self): x = list(map(str, list(self.pt_dict.keys())[0])) return "(u.t)(%s)" % (','.join(x),) - def to_riesz(self, poly_set): - # should be singleton - xs = list(self.pt_dict.keys()) - phis = poly_set.get_expansion_set().tabulate(poly_set.get_embedded_degree(), xs) - return numpy.outer(self.t, phis) + +class IntegralMomentOfEdgeTangentEvaluation(Functional): + r""" + \int_e v\cdot t p ds + + p \in Polynomials + + :arg ref_el: reference element for which e is a dim-1 entity + :arg Q: quadrature rule on the face + :arg P_at_qpts: polynomials evaluated at quad points + :arg edge: which edge. + """ + def __init__(self, ref_el, Q, P_at_qpts, edge): + t = ref_el.compute_edge_tangent(edge) + sd = ref_el.get_spatial_dimension() + transform = ref_el.get_entity_transform(1, edge) + pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points())) + weights = Q.get_weights() + pt_dict = OrderedDict() + for pt, wgt, phi in zip(pts, weights, P_at_qpts): + pt_dict[pt] = [(wgt*phi*t[i], (i, )) for i in range(sd)] + super().__init__(ref_el, (sd, ), pt_dict, {}, + "IntegralMomentOfEdgeTangentEvaluation") class PointFaceTangentEvaluation(Functional): @@ -420,10 +577,59 @@ def tostr(self): x = list(map(str, list(self.pt_dict.keys())[0])) return "(u.t%d)(%s)" % (self.tno, ','.join(x),) - def to_riesz(self, poly_set): - xs = list(self.pt_dict.keys()) - phis = poly_set.get_expansion_set().tabulate(poly_set.get_embedded_degree(), xs) - return numpy.outer(self.t, phis) + +class IntegralMomentOfFaceTangentEvaluation(Functional): + r""" + \int_F v \times n \cdot p ds + + p \in Polynomials + + :arg ref_el: reference element for which F is a codim-1 entity + :arg Q: quadrature rule on the face + :arg P_at_qpts: polynomials evaluated at quad points + :arg facet: which facet. + """ + def __init__(self, ref_el, Q, P_at_qpts, facet): + P_at_qpts = [[P_at_qpts[0][i], P_at_qpts[1][i], P_at_qpts[2][i]] + for i in range(P_at_qpts.shape[1])] + n = ref_el.compute_scaled_normal(facet) + sd = ref_el.get_spatial_dimension() + transform = ref_el.get_entity_transform(sd-1, facet) + pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points())) + weights = Q.get_weights() + pt_dict = OrderedDict() + for pt, wgt, phi in zip(pts, weights, P_at_qpts): + phixn = [phi[1]*n[2] - phi[2]*n[1], + phi[2]*n[0] - phi[0]*n[2], + phi[0]*n[1] - phi[1]*n[0]] + pt_dict[pt] = [(wgt*(-n[2]*phixn[1]+n[1]*phixn[2]), (0, )), + (wgt*(n[2]*phixn[0]-n[0]*phixn[2]), (1, )), + (wgt*(-n[1]*phixn[0]+n[0]*phixn[1]), (2, ))] + super().__init__(ref_el, (sd, ), pt_dict, {}, + "IntegralMomentOfFaceTangentEvaluation") + + +class MonkIntegralMoment(Functional): + r""" + face nodes are \int_F v\cdot p dA where p \in P_{q-2}(f)^3 with p \cdot n = 0 + (cmp. Peter Monk - Finite Element Methods for Maxwell's equations p. 129) + Note that we don't scale by the area of the facet + + :arg ref_el: reference element for which F is a codim-1 entity + :arg Q: quadrature rule on the face + :arg P_at_qpts: polynomials evaluated at quad points + :arg facet: which facet. + """ + + def __init__(self, ref_el, Q, P_at_qpts, facet): + sd = ref_el.get_spatial_dimension() + weights = Q.get_weights() + pt_dict = OrderedDict() + transform = ref_el.get_entity_transform(sd-1, facet) + pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points())) + for pt, wgt, phi in zip(pts, weights, P_at_qpts): + pt_dict[pt] = [(wgt*phi[i], (i, )) for i in range(sd)] + super().__init__(ref_el, (sd, ), pt_dict, {}, "MonkIntegralMoment") class PointScaledNormalEvaluation(Functional): @@ -437,16 +643,34 @@ def __init__(self, ref_el, facet_no, pt): shp = (sd,) pt_dict = {pt: [(self.n[i], (i,)) for i in range(sd)]} - Functional.__init__(self, ref_el, shp, pt_dict, {}, "PointScaledNormalEval") + super().__init__(ref_el, shp, pt_dict, {}, "PointScaledNormalEval") def tostr(self): x = list(map(str, list(self.pt_dict.keys())[0])) return "(u.n)(%s)" % (','.join(x),) - def to_riesz(self, poly_set): - xs = list(self.pt_dict.keys()) - phis = poly_set.get_expansion_set().tabulate(poly_set.get_embedded_degree(), xs) - return numpy.outer(self.n, phis) + +class IntegralMomentOfScaledNormalEvaluation(Functional): + r""" + \int_F v\cdot n p ds + + p \in Polynomials + + :arg ref_el: reference element for which F is a codim-1 entity + :arg Q: quadrature rule on the face + :arg P_at_qpts: polynomials evaluated at quad points + :arg facet: which facet. + """ + def __init__(self, ref_el, Q, P_at_qpts, facet): + n = ref_el.compute_scaled_normal(facet) + sd = ref_el.get_spatial_dimension() + transform = ref_el.get_entity_transform(sd - 1, facet) + pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points())) + weights = Q.get_weights() + pt_dict = OrderedDict() + for pt, wgt, phi in zip(pts, weights, P_at_qpts): + pt_dict[pt] = [(wgt*phi*n[i], (i, )) for i in range(sd)] + super().__init__(ref_el, (sd, ), pt_dict, {}, "IntegralMomentOfScaledNormalEvaluation") class PointwiseInnerProductEvaluation(Functional): @@ -469,4 +693,103 @@ def __init__(self, ref_el, v, w, p): for i, j in index_iterator((sd, sd))]} shp = (sd, sd) - Functional.__init__(self, ref_el, shp, pt_dict, {}, "PointwiseInnerProductEval") + super().__init__(ref_el, shp, pt_dict, {}, "PointwiseInnerProductEval") + + +class TensorBidirectionalMomentInnerProductEvaluation(Functional): + r""" + This is a functional on symmetric 2-tensor fields. Let u be such a + field, f a function tabulated at points, and v,w be vectors. This implements the evaluation + \int v^T u(x) w f(x). + + Clearly v^iu_{ij}w^j = u_{ij}v^iw^j. Thus the value can be computed + from the Frobenius inner product of u with wv^T. This gives the + correct weights. + """ + + def __init__(self, ref_el, v, w, Q, f_at_qpts, comp_deg): + sd = ref_el.get_spatial_dimension() + + wvT = numpy.outer(w, v) + + qpts, qwts = Q.get_points(), Q.get_weights() + + pt_dict = {} + for k, pt in enumerate(map(tuple(qpts))): + pt_dict[pt] = [] + for i, j in index_iterator((sd, sd)): + pt_dict[pt].append((qwts[k] * wvT[i][j] * f_at_qpts[i, j, k]), + (i, j)) + + shp = (sd, sd) + super().__init__(ref_el, shp, pt_dict, {}, "TensorBidirectionalMomentInnerProductEvaluation") + + +class IntegralMomentOfNormalEvaluation(Functional): + r""" + \int_F v\cdot n p ds + p \in Polynomials + :arg ref_el: reference element for which F is a codim-1 entity + :arg Q: quadrature rule on the face + :arg P_at_qpts: polynomials evaluated at quad points + :arg facet: which facet. + """ + def __init__(self, ref_el, Q, P_at_qpts, facet): + # scaling on the normal is ok because edge length then weights + # the reference element quadrature appropriately + n = ref_el.compute_scaled_normal(facet) + sd = ref_el.get_spatial_dimension() + transform = ref_el.get_entity_transform(sd - 1, facet) + pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points())) + weights = Q.get_weights() + pt_dict = OrderedDict() + for pt, wgt, phi in zip(pts, weights, P_at_qpts): + pt_dict[pt] = [(wgt*phi*n[i], (i, )) for i in range(sd)] + super().__init__(ref_el, (sd, ), pt_dict, {}, "IntegralMomentOfScaledNormalEvaluation") + + +class IntegralMomentOfTangentialEvaluation(Functional): + r""" + \int_F v\cdot n p ds + p \in Polynomials + :arg ref_el: reference element for which F is a codim-1 entity + :arg Q: quadrature rule on the face + :arg P_at_qpts: polynomials evaluated at quad points + :arg facet: which facet. + """ + def __init__(self, ref_el, Q, P_at_qpts, facet): + # scaling on the tangent is ok because edge length then weights + # the reference element quadrature appropriately + sd = ref_el.get_spatial_dimension() + assert sd == 2 + t = ref_el.compute_edge_tangent(facet) + transform = ref_el.get_entity_transform(sd - 1, facet) + pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points())) + weights = Q.get_weights() + pt_dict = OrderedDict() + for pt, wgt, phi in zip(pts, weights, P_at_qpts): + pt_dict[pt] = [(wgt*phi*t[i], (i, )) for i in range(sd)] + super().__init__(ref_el, (sd, ), pt_dict, {}, "IntegralMomentOfScaledTangentialEvaluation") + + +class IntegralMomentOfNormalNormalEvaluation(Functional): + r""" + \int_F (n^T tau n) p ds + p \in Polynomials + :arg ref_el: reference element for which F is a codim-1 entity + :arg Q: quadrature rule on the face + :arg P_at_qpts: polynomials evaluated at quad points + :arg facet: which facet. + """ + def __init__(self, ref_el, Q, P_at_qpts, facet): + # scaling on the normal is ok because edge length then weights + # the reference element quadrature appropriately + n = ref_el.compute_scaled_normal(facet) + sd = ref_el.get_spatial_dimension() + transform = ref_el.get_entity_transform(sd - 1, facet) + pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points())) + weights = Q.get_weights() + pt_dict = OrderedDict() + for pt, wgt, phi in zip(pts, weights, P_at_qpts): + pt_dict[pt] = [(wgt*phi*n[i], (i, )) for i in range(sd)] + super().__init__(ref_el, (sd, ), pt_dict, {}, "IntegralMomentOfScaledNormalEvaluation") diff --git a/FIAT/gauss_legendre.py b/FIAT/gauss_legendre.py index 3ed840e07..18f68cbb0 100644 --- a/FIAT/gauss_legendre.py +++ b/FIAT/gauss_legendre.py @@ -5,9 +5,14 @@ # SPDX-License-Identifier: LGPL-3.0-or-later # # Written by David A. Ham (david.ham@imperial.ac.uk), 2015 +# +# Modified by Pablo D. Brubeck (brubeck@protonmail.com), 2021 + +import numpy from FIAT import finite_element, polynomial_set, dual_set, functional, quadrature from FIAT.reference_element import LINE +from FIAT.barycentric_interpolation import barycentric_interpolation class GaussLegendreDualSet(dual_set.DualSet): @@ -31,3 +36,21 @@ def __init__(self, ref_el, degree): dual = GaussLegendreDualSet(ref_el, degree) formdegree = ref_el.get_spatial_dimension() # n-form super(GaussLegendre, self).__init__(poly_set, dual, degree, formdegree) + + def tabulate(self, order, points, entity=None): + # This overrides the default with a more numerically stable algorithm + + if entity is None: + entity = (self.ref_el.get_dimension(), 0) + + entity_dim, entity_id = entity + transform = self.ref_el.get_entity_transform(entity_dim, entity_id) + + xsrc = [] + for node in self.dual.nodes: + # Assert singleton point for each node. + (pt,), = node.get_point_dict().keys() + xsrc.append(pt) + xsrc = numpy.asarray(xsrc) + xdst = numpy.array(list(map(transform, points))).flatten() + return barycentric_interpolation(xsrc, xdst, order=order) diff --git a/FIAT/gauss_lobatto_legendre.py b/FIAT/gauss_lobatto_legendre.py index cb469c8b9..ee10c180d 100644 --- a/FIAT/gauss_lobatto_legendre.py +++ b/FIAT/gauss_lobatto_legendre.py @@ -5,9 +5,14 @@ # SPDX-License-Identifier: LGPL-3.0-or-later # # Written by David A. Ham (david.ham@imperial.ac.uk), 2015 +# +# Modified by Pablo D. Brubeck (brubeck@protonmail.com), 2021 + +import numpy from FIAT import finite_element, polynomial_set, dual_set, functional, quadrature from FIAT.reference_element import LINE +from FIAT.barycentric_interpolation import barycentric_interpolation class GaussLobattoLegendreDualSet(dual_set.DualSet): @@ -31,3 +36,21 @@ def __init__(self, ref_el, degree): dual = GaussLobattoLegendreDualSet(ref_el, degree) formdegree = 0 # 0-form super(GaussLobattoLegendre, self).__init__(poly_set, dual, degree, formdegree) + + def tabulate(self, order, points, entity=None): + # This overrides the default with a more numerically stable algorithm + + if entity is None: + entity = (self.ref_el.get_dimension(), 0) + + entity_dim, entity_id = entity + transform = self.ref_el.get_entity_transform(entity_dim, entity_id) + + xsrc = [] + for node in self.dual.nodes: + # Assert singleton point for each node. + (pt,), = node.get_point_dict().keys() + xsrc.append(pt) + xsrc = numpy.asarray(xsrc) + xdst = numpy.array(list(map(transform, points))).flatten() + return barycentric_interpolation(xsrc, xdst, order=order) diff --git a/FIAT/gauss_radau.py b/FIAT/gauss_radau.py new file mode 100644 index 000000000..89191756d --- /dev/null +++ b/FIAT/gauss_radau.py @@ -0,0 +1,36 @@ +# Copyright (C) 2015 Imperial College London and others. +# +# This file is part of FIAT (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later +# +# Written by Robert C. Kirby (robert_kirby@baylor.edu), 2020 + + +from FIAT import finite_element, polynomial_set, dual_set, functional, quadrature +from FIAT.reference_element import LINE + + +class GaussRadauDualSet(dual_set.DualSet): + """The dual basis for 1D discontinuous elements with nodes at the + Gauss-Radau points.""" + def __init__(self, ref_el, degree, right=True): + # Do DG connectivity because it's bonkers to do one-sided assembly even + # though we have an endpoint in the point set! + entity_ids = {0: {0: [], 1: []}, + 1: {0: list(range(0, degree+1))}} + lr = quadrature.RadauQuadratureLineRule(ref_el, degree+1, right) + nodes = [functional.PointEvaluation(ref_el, x) for x in lr.pts] + + super(GaussRadauDualSet, self).__init__(nodes, ref_el, entity_ids) + + +class GaussRadau(finite_element.CiarletElement): + """1D discontinuous element with nodes at the Gauss-Radau points.""" + def __init__(self, ref_el, degree): + if ref_el.shape != LINE: + raise ValueError("Gauss-Radau elements are only defined in one dimension.") + poly_set = polynomial_set.ONPolynomialSet(ref_el, degree) + dual = GaussRadauDualSet(ref_el, degree) + formdegree = ref_el.get_spatial_dimension() # n-form + super(GaussRadau, self).__init__(poly_set, dual, degree, formdegree) diff --git a/FIAT/hermite.py b/FIAT/hermite.py index 2f9487af5..e1368fea8 100644 --- a/FIAT/hermite.py +++ b/FIAT/hermite.py @@ -70,7 +70,9 @@ def __init__(self, ref_el): class CubicHermite(finite_element.CiarletElement): """The cubic Hermite finite element. It is what it is.""" - def __init__(self, ref_el): + def __init__(self, ref_el, deg=3): + assert deg == 3 poly_set = polynomial_set.ONPolynomialSet(ref_el, 3) dual = CubicHermiteDualSet(ref_el) + super(CubicHermite, self).__init__(poly_set, dual, 3) diff --git a/FIAT/kong_mulder_veldhuizen.py b/FIAT/kong_mulder_veldhuizen.py new file mode 100644 index 000000000..6a3ed15cc --- /dev/null +++ b/FIAT/kong_mulder_veldhuizen.py @@ -0,0 +1,178 @@ +# Copyright (C) 2020 Robert C. Kirby (Baylor University) +# +# contributions by Keith Roberts (University of São Paulo) +# +# This file is part of FIAT (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later +from FIAT import ( + finite_element, + dual_set, + functional, + Bubble, + FacetBubble, + Lagrange, + NodalEnrichedElement, + RestrictedElement, + reference_element, +) +from FIAT.quadrature_schemes import create_quadrature + +TRIANGLE = reference_element.UFCTriangle() +TETRAHEDRON = reference_element.UFCTetrahedron() + + +def _get_entity_ids(ref_el, degree): + """The topological association in a dictionary""" + T = ref_el.topology + sd = ref_el.get_spatial_dimension() + if degree == 1: # works for any spatial dimension. + entity_ids = {0: dict((i, [i]) for i in range(len(T[0])))} + for d in range(1, sd + 1): + entity_ids[d] = dict((i, []) for i in range(len(T[d]))) + elif degree == 2: + if sd == 2: + entity_ids = { + 0: dict((i, [i]) for i in range(3)), + 1: dict((i, [i + 3]) for i in range(3)), + 2: {0: [6]}, + } + elif sd == 3: + entity_ids = { + 0: dict((i, [i]) for i in range(4)), + 1: dict((i, [i + 4]) for i in range(6)), + 2: dict((i, [i + 10]) for i in range(4)), + 3: {0: [14]}, + } + elif degree == 3: + if sd == 2: + etop = [[3, 4], [6, 5], [7, 8]] + entity_ids = { + 0: dict((i, [i]) for i in range(3)), + 1: dict((i, etop[i]) for i in range(3)), + 2: {0: [9, 10, 11]}, + } + elif sd == 3: + etop = [[4, 5], [7, 6], [8, 9], [11, 10], [12, 13], [14, 15]] + ftop = [[16, 17, 18], [19, 20, 21], [22, 23, 24], [25, 26, 27]] + entity_ids = { + 0: dict((i, [i]) for i in range(4)), + 1: dict((i, etop[i]) for i in range(6)), + 2: dict((i, ftop[i]) for i in range(4)), + 3: {0: [28, 29, 30, 31]}, + } + elif degree == 4: + if sd == 2: + etop = [[6, 3, 7], [9, 4, 8], [10, 5, 11]] + entity_ids = { + 0: dict((i, [i]) for i in range(3)), + 1: dict((i, etop[i]) for i in range(3)), + 2: {0: [i for i in range(12, 18)]}, + } + elif degree == 5: + if sd == 2: + etop = [[9, 3, 4, 10], [12, 6, 5, 11], [13, 7, 8, 14]] + entity_ids = { + 0: dict((i, [i]) for i in range(3)), + 1: dict((i, etop[i]) for i in range(3)), + 2: {0: [i for i in range(15, 30)]}, + } + return entity_ids + + +def bump(T, deg): + """Increase degree of polynomial along face/edges""" + sd = T.get_spatial_dimension() + if deg == 1: + return (0, 0) + else: + if sd == 2: + if deg < 5: + return (1, 1) + elif deg == 5: + return (2, 2) + else: + raise ValueError("Degree not supported") + elif sd == 3: + if deg < 4: + return (1, 2) + else: + raise ValueError("Degree not supported") + else: + raise ValueError("Dimension of element is not supported") + + +def KongMulderVeldhuizenSpace(T, deg): + sd = T.get_spatial_dimension() + if deg == 1: + return Lagrange(T, 1).poly_set + else: + L = Lagrange(T, deg) + # Toss the bubble from Lagrange since it's dependent + # on the higher-dimensional bubbles + if sd == 2: + inds = [ + i + for i in range(L.space_dimension()) + if i not in L.dual.entity_ids[sd][0] + ] + elif sd == 3: + not_inds = [L.dual.entity_ids[sd][0]] + [ + L.dual.entity_ids[sd - 1][f] for f in L.dual.entity_ids[sd - 1] + ] + not_inds = [item for sublist in not_inds for item in sublist] + inds = [i for i in range(L.space_dimension()) if i not in not_inds] + RL = RestrictedElement(L, inds) + # interior cell bubble + bubs = Bubble(T, deg + bump(T, deg)[1]) + if sd == 2: + return NodalEnrichedElement(RL, bubs).poly_set + elif sd == 3: + # bubble on the facet + fbubs = FacetBubble(T, deg + bump(T, deg)[0]) + return NodalEnrichedElement(RL, bubs, fbubs).poly_set + + +class KongMulderVeldhuizenDualSet(dual_set.DualSet): + """The dual basis for KMV simplical elements.""" + + def __init__(self, ref_el, degree): + entity_ids = {} + entity_ids = _get_entity_ids(ref_el, degree) + lr = create_quadrature(ref_el, degree, scheme="KMV") + nodes = [functional.PointEvaluation(ref_el, x) for x in lr.pts] + super(KongMulderVeldhuizenDualSet, self).__init__(nodes, ref_el, entity_ids) + + +class KongMulderVeldhuizen(finite_element.CiarletElement): + """The "lumped" simplical finite element (NB: requires custom quad. "KMV" points to achieve a diagonal mass matrix). + + References + ---------- + + Higher-order triangular and tetrahedral finite elements with mass + lumping for solving the wave equation + M. J. S. CHIN-JOE-KONG, W. A. MULDER and M. VAN VELDHUIZEN + + HIGHER-ORDER MASS-LUMPED FINITE ELEMENTS FOR THE WAVE EQUATION + W.A. MULDER + + NEW HIGHER-ORDER MASS-LUMPED TETRAHEDRAL ELEMENTS + S. GEEVERS, W.A. MULDER, AND J.J.W. VAN DER VEGT + + """ + + def __init__(self, ref_el, degree): + if ref_el != TRIANGLE and ref_el != TETRAHEDRON: + raise ValueError("KMV is only valid for triangles and tetrahedrals") + if degree > 5 and ref_el == TRIANGLE: + raise NotImplementedError("Only P < 6 for triangles are implemented.") + if degree > 3 and ref_el == TETRAHEDRON: + raise NotImplementedError("Only P < 4 for tetrahedrals are implemented.") + S = KongMulderVeldhuizenSpace(ref_el, degree) + + dual = KongMulderVeldhuizenDualSet(ref_el, degree) + formdegree = 0 # 0-form + super(KongMulderVeldhuizen, self).__init__( + S, dual, degree + max(bump(ref_el, degree)), formdegree + ) diff --git a/FIAT/mardal_tai_winther.py b/FIAT/mardal_tai_winther.py new file mode 100644 index 000000000..0dbc76262 --- /dev/null +++ b/FIAT/mardal_tai_winther.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +"""Implementation of the Mardal-Tai-Winther finite elements.""" + +# Copyright (C) 2020 by Robert C. Kirby (Baylor University) +# +# This file is part of FIAT (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later + + +from FIAT.finite_element import CiarletElement +from FIAT.dual_set import DualSet +from FIAT.polynomial_set import ONPolynomialSet +from FIAT.functional import (IntegralMomentOfNormalEvaluation, + IntegralMomentOfTangentialEvaluation, + IntegralLegendreNormalMoment, + IntegralMomentOfDivergence) + +from FIAT.quadrature import make_quadrature + + +def DivergenceDubinerMoments(cell, start_deg, stop_deg, comp_deg): + onp = ONPolynomialSet(cell, stop_deg) + Q = make_quadrature(cell, comp_deg) + + pts = Q.get_points() + onp = onp.tabulate(pts, 0)[0, 0] + + ells = [] + + for ii in range((start_deg)*(start_deg+1)//2, + (stop_deg+1)*(stop_deg+2)//2): + ells.append(IntegralMomentOfDivergence(cell, Q, onp[ii, :])) + + return ells + + +class MardalTaiWintherDual(DualSet): + """Degrees of freedom for Mardal-Tai-Winther elements.""" + def __init__(self, cell, degree): + dim = cell.get_spatial_dimension() + if not dim == 2: + raise ValueError("Mardal-Tai-Winther elements are only" + "defined in dimension 2.") + + if not degree == 3: + raise ValueError("Mardal-Tai-Winther elements are only defined" + "for degree 3.") + + # construct the degrees of freedoms + dofs = [] # list of functionals + + # dof_ids[i][j] contains the indices of dofs that are associated with + # entity j in dim i + dof_ids = {} + + # no vertex dof + dof_ids[0] = {i: [] for i in range(dim + 1)} + + # edge dofs + (_dofs, _dof_ids) = self._generate_edge_dofs(cell, degree) + dofs.extend(_dofs) + dof_ids[1] = _dof_ids + + # no cell dofs + dof_ids[2] = {} + dof_ids[2][0] = [] + + # extra dofs for enforcing div(v) constant over the cell and + # v.n linear on edges + (_dofs, _edge_dof_ids, _cell_dof_ids) = self._generate_constraint_dofs(cell, degree, len(dofs)) + dofs.extend(_dofs) + + for entity_id in range(3): + dof_ids[1][entity_id] = dof_ids[1][entity_id] + _edge_dof_ids[entity_id] + + dof_ids[2][0] = dof_ids[2][0] + _cell_dof_ids + + super(MardalTaiWintherDual, self).__init__(dofs, cell, dof_ids) + + @staticmethod + def _generate_edge_dofs(cell, degree): + """Generate dofs on edges. + On each edge, let n be its normal. We need to integrate + u.n and u.t against the first Legendre polynomial (constant) + and u.n against the second (linear). + """ + dofs = [] + dof_ids = {} + offset = 0 + sd = 2 + + facet = cell.get_facet_element() + # Facet nodes are \int_F v\cdot n p ds where p \in P_{q-1} + # degree is q - 1 + Q = make_quadrature(facet, 6) + Pq = ONPolynomialSet(facet, 1) + Pq_at_qpts = Pq.tabulate(Q.get_points())[tuple([0]*(sd - 1))] + for f in range(3): + phi0 = Pq_at_qpts[0, :] + dofs.append(IntegralMomentOfNormalEvaluation(cell, Q, phi0, f)) + dofs.append(IntegralMomentOfTangentialEvaluation(cell, Q, phi0, f)) + phi1 = Pq_at_qpts[1, :] + dofs.append(IntegralMomentOfNormalEvaluation(cell, Q, phi1, f)) + + num_new_dofs = 3 + dof_ids[f] = list(range(offset, offset + num_new_dofs)) + offset += num_new_dofs + + return (dofs, dof_ids) + + @staticmethod + def _generate_constraint_dofs(cell, degree, offset): + """ + Generate constraint dofs on the cell and edges + * div(v) must be constant on the cell. Since v is a cubic and + div(v) is quadratic, we need the integral of div(v) against the + linear and quadratic Dubiner polynomials to vanish. + There are two linear and three quadratics, so these are five + constraints + * v.n must be linear on each edge. Since v.n is cubic, we need + the integral of v.n against the cubic and quadratic Legendre + polynomial to vanish on each edge. + + So we introduce functionals whose kernel describes this property, + as described in the FIAT paper. + """ + dofs = [] + + edge_dof_ids = {} + for entity_id in range(3): + dofs += [IntegralLegendreNormalMoment(cell, entity_id, 2, 6), + IntegralLegendreNormalMoment(cell, entity_id, 3, 6)] + + edge_dof_ids[entity_id] = [offset, offset+1] + offset += 2 + + cell_dofs = DivergenceDubinerMoments(cell, 1, 2, 6) + dofs.extend(cell_dofs) + cell_dof_ids = list(range(offset, offset+len(cell_dofs))) + + return (dofs, edge_dof_ids, cell_dof_ids) + + +class MardalTaiWinther(CiarletElement): + """The definition of the Mardal-Tai-Winther element. + """ + def __init__(self, cell, degree=3): + assert degree == 3, "Only defined for degree 3" + assert cell.get_spatial_dimension() == 2, "Only defined for dimension 2" + # polynomial space + Ps = ONPolynomialSet(cell, degree, (2,)) + + # degrees of freedom + Ls = MardalTaiWintherDual(cell, degree) + + # mapping under affine transformation + mapping = "contravariant piola" + + super(MardalTaiWinther, self).__init__(Ps, Ls, degree, + mapping=mapping) diff --git a/FIAT/morley.py b/FIAT/morley.py index 8eed2f8da..8db46f55a 100644 --- a/FIAT/morley.py +++ b/FIAT/morley.py @@ -45,7 +45,7 @@ def __init__(self, ref_el): entity_ids[2] = {0: []} - super(MorleyDualSet, self).__init__(nodes, ref_el, entity_ids) + super().__init__(nodes, ref_el, entity_ids) class Morley(finite_element.CiarletElement): @@ -54,4 +54,4 @@ class Morley(finite_element.CiarletElement): def __init__(self, ref_el): poly_set = polynomial_set.ONPolynomialSet(ref_el, 2) dual = MorleyDualSet(ref_el) - super(Morley, self).__init__(poly_set, dual, 2) + super().__init__(poly_set, dual, 2) diff --git a/FIAT/nedelec.py b/FIAT/nedelec.py index bb7bdb218..b35c81f27 100644 --- a/FIAT/nedelec.py +++ b/FIAT/nedelec.py @@ -9,6 +9,7 @@ finite_element, functional) from itertools import chain import numpy +from FIAT.check_format_variant import check_format_variant def NedelecSpace2D(ref_el, k): @@ -143,7 +144,7 @@ def NedelecSpace3D(ref_el, k): class NedelecDual2D(dual_set.DualSet): """Dual basis for first-kind Nedelec in 2D.""" - def __init__(self, ref_el, degree): + def __init__(self, ref_el, degree, variant, quad_deg): sd = ref_el.get_spatial_dimension() if sd != 2: raise Exception("Nedelec2D only works on triangles") @@ -152,29 +153,56 @@ def __init__(self, ref_el, degree): t = ref_el.get_topology() - num_edges = len(t[1]) + if variant == "integral": + # edge nodes are \int_F v\cdot t p ds where p \in P_{q-1}(edge) + # degree is q - 1 + edge = ref_el.get_facet_element() + Q = quadrature.make_quadrature(edge, quad_deg) + Pq = polynomial_set.ONPolynomialSet(edge, degree) + Pq_at_qpts = Pq.tabulate(Q.get_points())[tuple([0]*(sd - 1))] + for e in range(len(t[sd - 1])): + for i in range(Pq_at_qpts.shape[0]): + phi = Pq_at_qpts[i, :] + nodes.append(functional.IntegralMomentOfEdgeTangentEvaluation(ref_el, Q, phi, e)) + + # internal nodes. These are \int_T v \cdot p dx where p \in P_{q-2}^2 + if degree > 0: + Q = quadrature.make_quadrature(ref_el, quad_deg) + qpts = Q.get_points() + Pkm1 = polynomial_set.ONPolynomialSet(ref_el, degree - 1) + zero_index = tuple([0 for i in range(sd)]) + Pkm1_at_qpts = Pkm1.tabulate(qpts)[zero_index] + + for d in range(sd): + for i in range(Pkm1_at_qpts.shape[0]): + phi_cur = Pkm1_at_qpts[i, :] + l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,)) + nodes.append(l_cur) + + elif variant == "point": + num_edges = len(t[1]) + + # edge tangents + for i in range(num_edges): + pts_cur = ref_el.make_points(1, i, degree + 2) + for j in range(len(pts_cur)): + pt_cur = pts_cur[j] + f = functional.PointEdgeTangentEvaluation(ref_el, i, pt_cur) + nodes.append(f) - # edge tangents - for i in range(num_edges): - pts_cur = ref_el.make_points(1, i, degree + 2) - for j in range(len(pts_cur)): - pt_cur = pts_cur[j] - f = functional.PointEdgeTangentEvaluation(ref_el, i, pt_cur) - nodes.append(f) + # internal moments + if degree > 0: + Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1)) + qpts = Q.get_points() + Pkm1 = polynomial_set.ONPolynomialSet(ref_el, degree - 1) + zero_index = tuple([0 for i in range(sd)]) + Pkm1_at_qpts = Pkm1.tabulate(qpts)[zero_index] - # internal moments - if degree > 0: - Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1)) - qpts = Q.get_points() - Pkm1 = polynomial_set.ONPolynomialSet(ref_el, degree - 1) - zero_index = tuple([0 for i in range(sd)]) - Pkm1_at_qpts = Pkm1.tabulate(qpts)[zero_index] - - for d in range(sd): - for i in range(Pkm1_at_qpts.shape[0]): - phi_cur = Pkm1_at_qpts[i, :] - l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,)) - nodes.append(l_cur) + for d in range(sd): + for i in range(Pkm1_at_qpts.shape[0]): + phi_cur = Pkm1_at_qpts[i, :] + l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,)) + nodes.append(l_cur) entity_ids = {} @@ -204,7 +232,7 @@ def __init__(self, ref_el, degree): class NedelecDual3D(dual_set.DualSet): """Dual basis for first-kind Nedelec in 3D.""" - def __init__(self, ref_el, degree): + def __init__(self, ref_el, degree, variant, quad_deg): sd = ref_el.get_spatial_dimension() if sd != 3: raise Exception("NedelecDual3D only works on tetrahedra") @@ -213,40 +241,85 @@ def __init__(self, ref_el, degree): t = ref_el.get_topology() - # how many edges - num_edges = len(t[1]) - - for i in range(num_edges): - # points to specify P_k on each edge - pts_cur = ref_el.make_points(1, i, degree + 2) - for j in range(len(pts_cur)): - pt_cur = pts_cur[j] - f = functional.PointEdgeTangentEvaluation(ref_el, i, pt_cur) - nodes.append(f) - - if degree > 0: # face tangents - num_faces = len(t[2]) - for i in range(num_faces): # loop over faces - pts_cur = ref_el.make_points(2, i, degree + 2) - for j in range(len(pts_cur)): # loop over points + if variant == "integral": + # edge nodes are \int_F v\cdot t p ds where p \in P_{q-1}(edge) + # degree is q - 1 + edge = ref_el.get_facet_element().get_facet_element() + Q = quadrature.make_quadrature(edge, quad_deg) + Pq = polynomial_set.ONPolynomialSet(edge, degree) + Pq_at_qpts = Pq.tabulate(Q.get_points())[tuple([0]*(1))] + for e in range(len(t[1])): + for i in range(Pq_at_qpts.shape[0]): + phi = Pq_at_qpts[i, :] + nodes.append(functional.IntegralMomentOfEdgeTangentEvaluation(ref_el, Q, phi, e)) + + # face nodes are \int_F v\cdot p dA where p \in P_{q-2}(f)^3 with p \cdot n = 0 (cmp. Monk) + # these are equivalent to dofs from Fenics book defined by + # \int_F v\times n \cdot p ds where p \in P_{q-2}(f)^2 + if degree > 0: + facet = ref_el.get_facet_element() + Q = quadrature.make_quadrature(facet, quad_deg) + Pq = polynomial_set.ONPolynomialSet(facet, degree-1, (sd,)) + Pq_at_qpts = Pq.tabulate(Q.get_points())[(0, 0)] + + for f in range(len(t[2])): + # R is used to map [1,0,0] to tangent1 and [0,1,0] to tangent2 + R = ref_el.compute_face_tangents(f) + + # Skip last functionals because we only want p with p \cdot n = 0 + for i in range(2 * Pq.get_num_members() // 3): + phi = Pq_at_qpts[i, ...] + phi = numpy.matmul(phi[:-1, ...].T, R) + nodes.append(functional.MonkIntegralMoment(ref_el, Q, phi, f)) + + # internal nodes. These are \int_T v \cdot p dx where p \in P_{q-3}^3(T) + if degree > 1: + Q = quadrature.make_quadrature(ref_el, quad_deg) + qpts = Q.get_points() + Pkm2 = polynomial_set.ONPolynomialSet(ref_el, degree - 2) + zero_index = tuple([0 for i in range(sd)]) + Pkm2_at_qpts = Pkm2.tabulate(qpts)[zero_index] + + for d in range(sd): + for i in range(Pkm2_at_qpts.shape[0]): + phi_cur = Pkm2_at_qpts[i, :] + l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,)) + nodes.append(l_cur) + + elif variant == "point": + num_edges = len(t[1]) + + for i in range(num_edges): + # points to specify P_k on each edge + pts_cur = ref_el.make_points(1, i, degree + 2) + for j in range(len(pts_cur)): pt_cur = pts_cur[j] - for k in range(2): # loop over tangents - f = functional.PointFaceTangentEvaluation(ref_el, i, k, pt_cur) - nodes.append(f) - - if degree > 1: # internal moments - Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1)) - qpts = Q.get_points() - Pkm2 = polynomial_set.ONPolynomialSet(ref_el, degree - 2) - zero_index = tuple([0 for i in range(sd)]) - Pkm2_at_qpts = Pkm2.tabulate(qpts)[zero_index] - - for d in range(sd): - for i in range(Pkm2_at_qpts.shape[0]): - phi_cur = Pkm2_at_qpts[i, :] - f = functional.IntegralMoment(ref_el, Q, phi_cur, (d,)) + f = functional.PointEdgeTangentEvaluation(ref_el, i, pt_cur) nodes.append(f) + if degree > 0: # face tangents + num_faces = len(t[2]) + for i in range(num_faces): # loop over faces + pts_cur = ref_el.make_points(2, i, degree + 2) + for j in range(len(pts_cur)): # loop over points + pt_cur = pts_cur[j] + for k in range(2): # loop over tangents + f = functional.PointFaceTangentEvaluation(ref_el, i, k, pt_cur) + nodes.append(f) + + if degree > 1: # internal moments + Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1)) + qpts = Q.get_points() + Pkm2 = polynomial_set.ONPolynomialSet(ref_el, degree - 2) + zero_index = tuple([0 for i in range(sd)]) + Pkm2_at_qpts = Pkm2.tabulate(qpts)[zero_index] + + for d in range(sd): + for i in range(Pkm2_at_qpts.shape[0]): + phi_cur = Pkm2_at_qpts[i, :] + f = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,)) + nodes.append(f) + entity_ids = {} # set to empty for i in range(sd + 1): @@ -277,18 +350,34 @@ def __init__(self, ref_el, degree): class Nedelec(finite_element.CiarletElement): - """Nedelec finite element""" + """ + Nedelec finite element + + :arg ref_el: The reference element. + :arg k: The degree. + :arg variant: optional variant specifying the types of nodes. + + variant can be chosen from ["point", "integral", "integral(quadrature_degree)"] + "point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal + convergence order in the H(curl)-norm + "integral" -> dofs are evaluated by quadrature rule. The quadrature degree is chosen to integrate + polynomials of degree 5*k so that most expressions will be interpolated exactly. This is important + when you want to have (nearly) curl-preserving interpolation. + "integral(quadrature_degree)" -> dofs are evaluated by quadrature rule of degree quadrature_degree + """ + + def __init__(self, ref_el, k, variant=None): - def __init__(self, ref_el, q): + degree = k - 1 - degree = q - 1 + (variant, quad_deg) = check_format_variant(variant, degree, "Nedelec") if ref_el.get_spatial_dimension() == 3: poly_set = NedelecSpace3D(ref_el, degree) - dual = NedelecDual3D(ref_el, degree) + dual = NedelecDual3D(ref_el, degree, variant, quad_deg) elif ref_el.get_spatial_dimension() == 2: poly_set = NedelecSpace2D(ref_el, degree) - dual = NedelecDual2D(ref_el, degree) + dual = NedelecDual2D(ref_el, degree, variant, quad_deg) else: raise Exception("Not implemented") formdegree = 1 # 1-form diff --git a/FIAT/nedelec_second_kind.py b/FIAT/nedelec_second_kind.py index ab6b64cb2..49b3c8b7e 100644 --- a/FIAT/nedelec_second_kind.py +++ b/FIAT/nedelec_second_kind.py @@ -15,6 +15,9 @@ from FIAT.raviart_thomas import RaviartThomas from FIAT.quadrature import make_quadrature, UFCTetrahedronFaceQuadratureRule from FIAT.reference_element import UFCTetrahedron +from FIAT.check_format_variant import check_format_variant + +from FIAT import polynomial_set, quadrature, functional class NedelecSecondKindDual(DualSet): @@ -46,15 +49,14 @@ class NedelecSecondKindDual(DualSet): these elements coincide with the CG_k elements.) """ - def __init__(self, cell, degree): + def __init__(self, cell, degree, variant, quad_deg): # Define degrees of freedom - (dofs, ids) = self.generate_degrees_of_freedom(cell, degree) - + (dofs, ids) = self.generate_degrees_of_freedom(cell, degree, variant, quad_deg) # Call init of super-class super(NedelecSecondKindDual, self).__init__(dofs, cell, ids) - def generate_degrees_of_freedom(self, cell, degree): + def generate_degrees_of_freedom(self, cell, degree, variant, quad_deg): "Generate dofs and geometry-to-dof maps (ids)." dofs = [] @@ -68,25 +70,25 @@ def generate_degrees_of_freedom(self, cell, degree): ids[0] = dict(list(zip(list(range(d + 1)), ([] for i in range(d + 1))))) # (d+1) degrees of freedom per entity of codimension 1 (edges) - (edge_dofs, edge_ids) = self._generate_edge_dofs(cell, degree, 0) + (edge_dofs, edge_ids) = self._generate_edge_dofs(cell, degree, 0, variant, quad_deg) dofs.extend(edge_dofs) ids[1] = edge_ids # Include face degrees of freedom if 3D if d == 3: (face_dofs, face_ids) = self._generate_face_dofs(cell, degree, - len(dofs)) + len(dofs), variant, quad_deg) dofs.extend(face_dofs) ids[2] = face_ids # Varying degrees of freedom (possibly zero) per cell - (cell_dofs, cell_ids) = self._generate_cell_dofs(cell, degree, len(dofs)) + (cell_dofs, cell_ids) = self._generate_cell_dofs(cell, degree, len(dofs), variant, quad_deg) dofs.extend(cell_dofs) ids[d] = cell_ids return (dofs, ids) - def _generate_edge_dofs(self, cell, degree, offset): + def _generate_edge_dofs(self, cell, degree, offset, variant, quad_deg): """Generate degrees of freedoms (dofs) for entities of codimension 1 (edges).""" @@ -94,21 +96,35 @@ def _generate_edge_dofs(self, cell, degree, offset): # freedom per entity of codimension 1 (edges) dofs = [] ids = {} - for edge in range(len(cell.get_topology()[1])): - # Create points for evaluation of tangential components - points = cell.make_points(1, edge, degree + 2) + if variant == "integral": + edge = cell.construct_subelement(1) + Q = quadrature.make_quadrature(edge, quad_deg) + Pq = polynomial_set.ONPolynomialSet(edge, degree) + Pq_at_qpts = Pq.tabulate(Q.get_points())[tuple([0]*(1))] + for e in range(len(cell.get_topology()[1])): + for i in range(Pq_at_qpts.shape[0]): + phi = Pq_at_qpts[i, :] + dofs.append(functional.IntegralMomentOfEdgeTangentEvaluation(cell, Q, phi, e)) + jj = Pq_at_qpts.shape[0] * e + ids[e] = list(range(offset + jj, offset + jj + Pq_at_qpts.shape[0])) + + elif variant == "point": + for edge in range(len(cell.get_topology()[1])): - # A tangential component evaluation for each point - dofs += [Tangent(cell, edge, point) for point in points] + # Create points for evaluation of tangential components + points = cell.make_points(1, edge, degree + 2) - # Associate these dofs with this edge - i = len(points) * edge - ids[edge] = list(range(offset + i, offset + i + len(points))) + # A tangential component evaluation for each point + dofs += [Tangent(cell, edge, point) for point in points] + + # Associate these dofs with this edge + i = len(points) * edge + ids[edge] = list(range(offset + i, offset + i + len(points))) return (dofs, ids) - def _generate_face_dofs(self, cell, degree, offset): + def _generate_face_dofs(self, cell, degree, offset, variant, quad_deg): """Generate degrees of freedoms (dofs) for faces.""" # Initialize empty dofs and identifiers (ids) @@ -134,7 +150,7 @@ def _generate_face_dofs(self, cell, degree, offset): # Construct Raviart-Thomas of (degree - 1) on the # reference face reference_face = Q_face.reference_rule().ref_el - RT = RaviartThomas(reference_face, degree - 1) + RT = RaviartThomas(reference_face, degree - 1, variant) num_rts = RT.space_dimension() # Evaluate RT basis functions at reference quadrature @@ -168,7 +184,7 @@ def _generate_face_dofs(self, cell, degree, offset): return (dofs, ids) - def _generate_cell_dofs(self, cell, degree, offset): + def _generate_cell_dofs(self, cell, degree, offset, variant, quad_deg): """Generate degrees of freedoms (dofs) for entities of codimension d (cells).""" @@ -182,7 +198,7 @@ def _generate_cell_dofs(self, cell, degree, offset): qs = Q.get_points() # Create Raviart-Thomas nodal basis - RT = RaviartThomas(cell, degree + 1 - d) + RT = RaviartThomas(cell, degree + 1 - d, variant) phi = RT.get_nodal_basis() # Evaluate Raviart-Thomas basis at quadrature points @@ -203,21 +219,35 @@ class NedelecSecondKind(CiarletElement): tetrahedra: the polynomial space described by the full polynomials of degree k, with a suitable set of degrees of freedom to ensure H(curl) conformity. + + :arg ref_el: The reference element. + :arg k: The degree. + :arg variant: optional variant specifying the types of nodes. + + variant can be chosen from ["point", "integral", "integral(quadrature_degree)"] + "point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal + convergence order in the H(curl)-norm + "integral" -> dofs are evaluated by quadrature rule. The quadrature degree is chosen to integrate + polynomials of degree 5*k so that most expressions will be interpolated exactly. This is important + when you want to have (nearly) curl-preserving interpolation. + "integral(quadrature_degree)" -> dofs are evaluated by quadrature rule of degree quadrature_degree """ - def __init__(self, cell, degree): + def __init__(self, cell, k, variant=None): + + (variant, quad_deg) = check_format_variant(variant, k, "Nedelec Second Kind") # Check degree - assert degree >= 1, "Second kind Nedelecs start at 1!" + assert k >= 1, "Second kind Nedelecs start at 1!" # Get dimension d = cell.get_spatial_dimension() # Construct polynomial basis for d-vector fields - Ps = ONPolynomialSet(cell, degree, (d, )) + Ps = ONPolynomialSet(cell, k, (d, )) # Construct dual space - Ls = NedelecSecondKindDual(cell, degree) + Ls = NedelecSecondKindDual(cell, k, variant, quad_deg) # Set form degree formdegree = 1 # 1-form @@ -226,4 +256,4 @@ def __init__(self, cell, degree): mapping = "covariant piola" # Call init of super-class - super(NedelecSecondKind, self).__init__(Ps, Ls, degree, formdegree, mapping=mapping) + super(NedelecSecondKind, self).__init__(Ps, Ls, k, formdegree, mapping=mapping) diff --git a/FIAT/pointwise_dual.py b/FIAT/pointwise_dual.py new file mode 100644 index 000000000..d403d64d1 --- /dev/null +++ b/FIAT/pointwise_dual.py @@ -0,0 +1,70 @@ +# Copyright (C) 2020 Robert C. Kirby (Baylor University) +# +# This file is part of FIAT (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later +# +import numpy as np +from FIAT.functional import Functional +from FIAT.dual_set import DualSet +from collections import defaultdict +from itertools import zip_longest + + +def compute_pointwise_dual(el, pts): + """Constructs a dual basis to the basis for el as a linear combination + of a set of pointwise evaluations. This is useful when the + prescribed finite element isn't Ciarlet (e.g. the basis functions + are provided explicitly as formulae). Alternately, the element's + given dual basis may involve differentiation, making run-time + interpolation difficult in FIAT clients. The pointwise dual, + consisting only of pointwise evaluations, will effectively replace + these derivatives with (automatically determined) finite + differences. This is exact on the polynomial space, but is an + approximation if applied to functions outside the space. + + :param el: a :class:`FiniteElement`. + :param pts: an iterable of points with the same length as el's + dimension. These points must be unisolvent for the + polynomial space + :returns: a :class `DualSet` + """ + nbf = el.space_dimension() + + T = el.ref_el + sd = T.get_spatial_dimension() + + assert np.asarray(pts).shape == (int(nbf / np.prod(el.value_shape())), sd) + + z = tuple([0] * sd) + + nds = [] + + V = el.tabulate(0, pts)[z] + + # Make a square system, invert, and then put it back in the right + # shape so we have (nbf, ..., npts) with more dimensions + # for vector or tensor-valued elements. + alphas = np.linalg.inv(V.reshape((nbf, -1)).T).reshape(V.shape) + + # Each row of alphas gives the coefficients of a functional, + # represented, as elsewhere in FIAT, as a summation of + # components of the input at particular points. + + # This logic picks out the points and components for which the + # weights are actually nonzero to construct the functional. + + pts = np.asarray(pts) + for coeffs in alphas: + pt_dict = defaultdict(list) + nonzero = np.where(np.abs(coeffs) > 1.e-12) + *comp, pt_index = nonzero + + for pt, coeff_comp in zip(pts[pt_index], + zip_longest(coeffs[nonzero], + zip(*comp), fillvalue=())): + pt_dict[tuple(pt)].append(coeff_comp) + + nds.append(Functional(T, el.value_shape(), dict(pt_dict), {}, "node")) + + return DualSet(nds, T, el.entity_dofs()) diff --git a/FIAT/polynomial_set.py b/FIAT/polynomial_set.py index 4f1111ae1..9bf0b71dd 100644 --- a/FIAT/polynomial_set.py +++ b/FIAT/polynomial_set.py @@ -75,7 +75,12 @@ def tabulate(self, pts, jet_order=0): for i in range(jet_order + 1): alphas = mis(self.ref_el.get_spatial_dimension(), i) for alpha in alphas: - D = form_matrix_product(self.dmats, alpha) + if len(self.dmats) > 0: + D = form_matrix_product(self.dmats, alpha) + else: + # special for vertex without defined point location + assert pts == [()] + D = numpy.eye(1) result[alpha] = numpy.dot(self.coeffs, numpy.dot(numpy.transpose(D), base_vals)) diff --git a/FIAT/quadrature.py b/FIAT/quadrature.py index 46ea01cb5..a93b73c1a 100644 --- a/FIAT/quadrature.py +++ b/FIAT/quadrature.py @@ -123,6 +123,63 @@ def __init__(self, ref_el, m): QuadratureRule.__init__(self, ref_el, xs, ws) +class RadauQuadratureLineRule(QuadratureRule): + """Produce the Gauss--Radau quadrature rules on the interval using + an adaptation of Winkel's Matlab code. + + The quadrature rule uses m points for a degree of precision of 2m-1. + """ + def __init__(self, ref_el, m, right=True): + assert m >= 1 + N = m - 1 + # Use Chebyshev-Gauss-Radau nodes as initial guess for LGR nodes + x = -numpy.cos(2 * numpy.pi * numpy.linspace(0, N, m) / (2 * N + 1)) + + P = numpy.zeros((N + 1, N + 2)) + + xold = 2 + + free = numpy.arange(1, N + 1, dtype='int') + + while numpy.max(numpy.abs(x - xold)) > 5e-16: + xold = x.copy() + + P[0, :] = (-1) ** numpy.arange(0, N + 2) + P[free, 0] = 1 + P[free, 1] = x[free] + + for k in range(2, N + 2): + P[free, k] = ((2 * k - 1) * x[free] * P[free, k - 1] - (k - 1) * P[free, k - 2]) / k + + x[free] = xold[free] - ((1 - xold[free]) / (N + 1)) * (P[free, N] + P[free, N + 1]) / (P[free, N] - P[free, N + 1]) + + # The Legendre-Gauss-Radau Vandermonde + P = P[:, :-1] + # Compute the weights + w = numpy.zeros(N + 1) + w[0] = 2 / (N + 1) ** 2 + w[free] = (1 - x[free])/((N + 1) * P[free, -1])**2 + + if right: + x = numpy.flip(-x) + w = numpy.flip(w) + + xs_ref = x + ws_ref = w + + A, b = reference_element.make_affine_mapping(((-1.,), (1.)), + ref_el.get_vertices()) + + mapping = lambda x: numpy.dot(A, x) + b + + scale = numpy.linalg.det(A) + + xs = tuple([tuple(mapping(x_ref)[0]) for x_ref in xs_ref]) + ws = tuple([scale * w for w in ws_ref]) + + QuadratureRule.__init__(self, ref_el, xs, ws) + + class CollapsedQuadratureTriangleRule(QuadratureRule): """Implements the collapsed quadrature rules defined in Karniadakis & Sherwin by mapping products of Gauss-Jacobi rules diff --git a/FIAT/quadrature_element.py b/FIAT/quadrature_element.py index 58e0933fd..d71e59c7c 100644 --- a/FIAT/quadrature_element.py +++ b/FIAT/quadrature_element.py @@ -19,7 +19,7 @@ class QuadratureElement(FiniteElement): """A set of quadrature points pretending to be a finite element.""" - def __init__(self, ref_el, points): + def __init__(self, ref_el, points, weights=None): # Create entity dofs. entity_dofs = {dim: {entity: [] for entity in entities} for dim, entities in ref_el.get_topology().items()} @@ -33,7 +33,8 @@ def __init__(self, ref_el, points): dual = DualSet(nodes, ref_el, entity_dofs) super(QuadratureElement, self).__init__(ref_el, dual, order=None) - self._points = points # save the quadrature points + self._points = points # save the quadrature points & weights + self._weights = weights def value_shape(self): "The QuadratureElement is scalar valued" diff --git a/FIAT/quadrature_schemes.py b/FIAT/quadrature_schemes.py index de9470361..fc83a68d0 100644 --- a/FIAT/quadrature_schemes.py +++ b/FIAT/quadrature_schemes.py @@ -77,6 +77,8 @@ def create_quadrature(ref_el, degree, scheme="default"): return _fiat_scheme(ref_el, degree) elif scheme == "canonical": return _fiat_scheme(ref_el, degree) + elif scheme == "KMV": # Kong-Mulder-Veldhuizen scheme + return _kmv_lump_scheme(ref_el, degree) else: raise ValueError("Unknown quadrature scheme: %s." % scheme) @@ -91,6 +93,238 @@ def _fiat_scheme(ref_el, degree): return make_quadrature(ref_el, num_points_per_axis) +def _kmv_lump_scheme(ref_el, degree): + """Specialized quadrature schemes for P < 6 for KMV simplical elements.""" + + sd = ref_el.get_spatial_dimension() + # set the unit element + if sd == 2: + T = UFCTriangle() + elif sd == 3: + T = UFCTetrahedron() + else: + raise ValueError("Dimension not supported") + + if degree == 1: + x = ref_el.vertices + w = arange(sd + 1, dtype=float64) + if sd == 2: + w[:] = 1.0 / 6.0 + elif sd == 3: + w[:] = 1.0 / 24.0 + else: + raise ValueError("Dimension not supported") + elif degree == 2: + if sd == 2: + x = list(ref_el.vertices) + for e in range(3): + x.extend(ref_el.make_points(1, e, 2)) # edge midpoints + x.extend(ref_el.make_points(2, 0, 3)) # barycenter + w = arange(7, dtype=float64) + w[0:3] = 1.0 / 40.0 + w[3:6] = 1.0 / 15.0 + w[6] = 9.0 / 40.0 + elif sd == 3: + x = list(ref_el.vertices) + x.extend( + [ + (0.0, 0.50, 0.50), + (0.50, 0.0, 0.50), + (0.50, 0.50, 0.0), + (0.0, 0.0, 0.50), + (0.0, 0.50, 0.0), + (0.50, 0.0, 0.0), + ] + ) + # in facets + x.extend( + [ + (0.33333333333333337, 0.3333333333333333, 0.3333333333333333), + (0.0, 0.3333333333333333, 0.3333333333333333), + (0.3333333333333333, 0.0, 0.3333333333333333), + (0.3333333333333333, 0.3333333333333333, 0.0), + ] + ) + # in the cell + x.extend([(1 / 4, 1 / 4, 1 / 4)]) + w = arange(15, dtype=float64) + w[0:4] = 17.0 / 5040.0 + w[4:10] = 2.0 / 315.0 + w[10:14] = 9.0 / 560.0 + w[14] = 16.0 / 315.0 + else: + raise ValueError("Dimension not supported") + + elif degree == 3: + if sd == 2: + alpha = 0.2934695559090401 + beta = 0.2073451756635909 + x = list(ref_el.vertices) + x.extend( + [ + (1 - alpha, alpha), + (alpha, 1 - alpha), + (0.0, 1 - alpha), + (0.0, alpha), + (alpha, 0.0), + (1 - alpha, 0.0), + ] # edge points + ) + x.extend( + [(beta, beta), (1 - 2 * beta, beta), (beta, 1 - 2 * beta)] + ) # points in center of cell + w = arange(12, dtype=float64) + w[0:3] = 0.007436456512410291 + w[3:9] = 0.02442084061702551 + w[9:12] = 0.1103885289202054 + elif sd == 3: + x = list(ref_el.vertices) + x.extend( + [ + (0, 0.685789657581967, 0.314210342418033), + (0, 0.314210342418033, 0.685789657581967), + (0.314210342418033, 0, 0.685789657581967), + (0.685789657581967, 0, 0.314210342418033), + (0.685789657581967, 0.314210342418033, 0.0), + (0.314210342418033, 0.685789657581967, 0.0), + (0, 0, 0.685789657581967), + (0, 0, 0.314210342418033), + (0, 0.314210342418033, 0.0), + (0, 0.685789657581967, 0.0), + (0.314210342418033, 0, 0.0), + (0.685789657581967, 0, 0.0), + ] + ) # 12 points on edges of facets (0-->1-->2) + + x.extend( + [ + (0.21548220313557542, 0.5690355937288492, 0.21548220313557542), + (0.21548220313557542, 0.21548220313557542, 0.5690355937288492), + (0.5690355937288492, 0.21548220313557542, 0.21548220313557542), + (0.0, 0.5690355937288492, 0.21548220313557542), + (0.0, 0.21548220313557542, 0.5690355937288492), + (0.0, 0.21548220313557542, 0.21548220313557542), + (0.5690355937288492, 0.0, 0.21548220313557542), + (0.21548220313557542, 0.0, 0.5690355937288492), + (0.21548220313557542, 0.0, 0.21548220313557542), + (0.5690355937288492, 0.21548220313557542, 0.0), + (0.21548220313557542, 0.5690355937288492, 0.0), + (0.21548220313557542, 0.21548220313557542, 0.0), + ] + ) # 12 points (3 points on each facet, 1st two parallel to edge 0) + alpha = 1 / 6 + x.extend( + [ + (alpha, alpha, 0.5), + (0.5, alpha, alpha), + (alpha, 0.5, alpha), + (alpha, alpha, alpha), + ] + ) # 4 points inside the cell + w = arange(32, dtype=float64) + w[0:4] = 0.00068688236002531922325120561367839 + w[4:16] = 0.0015107814913526136472998739890272 + w[16:28] = 0.0050062894680040258624242888174649 + w[28:32] = 0.021428571428571428571428571428571 + else: + raise ValueError("Dimension not supported") + elif degree == 4: + if sd == 2: + alpha = 0.2113248654051871 # 0.2113248654051871 + beta1 = 0.4247639617258106 # 0.4247639617258106 + beta2 = 0.130791593829745 # 0.130791593829745 + x = list(ref_el.vertices) + for e in range(3): + x.extend(ref_el.make_points(1, e, 2)) # edge midpoints + x.extend( + [ + (1 - alpha, alpha), + (alpha, 1 - alpha), + (0.0, 1 - alpha), + (0.0, alpha), + (alpha, 0.0), + (1 - alpha, 0.0), + ] # edge points + ) + x.extend( + [(beta1, beta1), (1 - 2 * beta1, beta1), (beta1, 1 - 2 * beta1)] + ) # points in center of cell + x.extend( + [(beta2, beta2), (1 - 2 * beta2, beta2), (beta2, 1 - 2 * beta2)] + ) # points in center of cell + w = arange(18, dtype=float64) + w[0:3] = 0.003174603174603175 # chk + w[3:6] = 0.0126984126984127 # chk 0.0126984126984127 + w[6:12] = 0.01071428571428571 # chk 0.01071428571428571 + w[12:15] = 0.07878121446939182 # chk 0.07878121446939182 + w[15:18] = 0.05058386489568756 # chk 0.05058386489568756 + else: + raise ValueError("Dimension not supported") + + elif degree == 5: + if sd == 2: + alpha1 = 0.3632980741536860e-00 + alpha2 = 0.1322645816327140e-00 + beta1 = 0.4578368380791611e-00 + beta2 = 0.2568591072619591e-00 + beta3 = 0.5752768441141011e-01 + gamma1 = 0.7819258362551702e-01 + delta1 = 0.2210012187598900e-00 + x = list(ref_el.vertices) + x.extend( + [ + (1 - alpha1, alpha1), + (alpha1, 1 - alpha1), + (0.0, 1 - alpha1), + (0.0, alpha1), + (alpha1, 0.0), + (1 - alpha1, 0.0), + ] # edge points + ) + x.extend( + [ + (1 - alpha2, alpha2), + (alpha2, 1 - alpha2), + (0.0, 1 - alpha2), + (0.0, alpha2), + (alpha2, 0.0), + (1 - alpha2, 0.0), + ] # edge points + ) + x.extend( + [(beta1, beta1), (1 - 2 * beta1, beta1), (beta1, 1 - 2 * beta1)] + ) # points in center of cell + x.extend( + [(beta2, beta2), (1 - 2 * beta2, beta2), (beta2, 1 - 2 * beta2)] + ) # points in center of cell + x.extend( + [(beta3, beta3), (1 - 2 * beta3, beta3), (beta3, 1 - 2 * beta3)] + ) # points in center of cell + x.extend( + [ + (gamma1, delta1), + (1 - gamma1 - delta1, delta1), + (gamma1, 1 - gamma1 - delta1), + (delta1, gamma1), + (1 - gamma1 - delta1, gamma1), + (delta1, 1 - gamma1 - delta1), + ] # edge points + ) + w = arange(30, dtype=float64) + w[0:3] = 0.7094239706792450e-03 + w[3:9] = 0.6190565003676629e-02 + w[9:15] = 0.3480578640489211e-02 + w[15:18] = 0.3453043037728279e-01 + w[18:21] = 0.4590123763076286e-01 + w[21:24] = 0.1162613545961757e-01 + w[24:30] = 0.2727857596999626e-01 + else: + raise ValueError("Dimension not supported") + + # Return scheme + return QuadratureRule(T, x, w) + + def _triangle_scheme(degree): """Return a quadrature scheme on a triangle of specified order. Falls back on canonical rule for higher orders.""" diff --git a/FIAT/raviart_thomas.py b/FIAT/raviart_thomas.py index 01ce4dc2a..26816831c 100644 --- a/FIAT/raviart_thomas.py +++ b/FIAT/raviart_thomas.py @@ -9,6 +9,7 @@ finite_element, functional) import numpy from itertools import chain +from FIAT.check_format_variant import check_format_variant def RTSpace(ref_el, deg): @@ -65,41 +66,56 @@ class RTDualSet(dual_set.DualSet): evaluation of normals on facets of codimension 1 and internal moments against polynomials""" - def __init__(self, ref_el, degree): + def __init__(self, ref_el, degree, variant, quad_deg): entity_ids = {} nodes = [] sd = ref_el.get_spatial_dimension() t = ref_el.get_topology() - # codimension 1 facets - for i in range(len(t[sd - 1])): - pts_cur = ref_el.make_points(sd - 1, i, sd + degree) - for j in range(len(pts_cur)): - pt_cur = pts_cur[j] - f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur) - nodes.append(f) - - # internal nodes. Let's just use points at a lattice - if degree > 0: - cpe = functional.ComponentPointEvaluation - pts = ref_el.make_points(sd, 0, degree + sd) - for d in range(sd): - for i in range(len(pts)): - l_cur = cpe(ref_el, d, (sd,), pts[i]) - nodes.append(l_cur) - - # Q = quadrature.make_quadrature(ref_el, 2 * ( degree + 1 )) - # qpts = Q.get_points() - # Pkm1 = polynomial_set.ONPolynomialSet(ref_el, degree - 1) - # zero_index = tuple([0 for i in range(sd)]) - # Pkm1_at_qpts = Pkm1.tabulate(qpts)[zero_index] - - # for d in range(sd): - # for i in range(Pkm1_at_qpts.shape[0]): - # phi_cur = Pkm1_at_qpts[i, :] - # l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,)) - # nodes.append(l_cur) + if variant == "integral": + facet = ref_el.get_facet_element() + # Facet nodes are \int_F v\cdot n p ds where p \in P_{q-1} + # degree is q - 1 + Q = quadrature.make_quadrature(facet, quad_deg) + Pq = polynomial_set.ONPolynomialSet(facet, degree) + Pq_at_qpts = Pq.tabulate(Q.get_points())[tuple([0]*(sd - 1))] + for f in range(len(t[sd - 1])): + for i in range(Pq_at_qpts.shape[0]): + phi = Pq_at_qpts[i, :] + nodes.append(functional.IntegralMomentOfScaledNormalEvaluation(ref_el, Q, phi, f)) + + # internal nodes. These are \int_T v \cdot p dx where p \in P_{q-2}^d + if degree > 0: + Q = quadrature.make_quadrature(ref_el, quad_deg) + qpts = Q.get_points() + Pkm1 = polynomial_set.ONPolynomialSet(ref_el, degree - 1) + zero_index = tuple([0 for i in range(sd)]) + Pkm1_at_qpts = Pkm1.tabulate(qpts)[zero_index] + + for d in range(sd): + for i in range(Pkm1_at_qpts.shape[0]): + phi_cur = Pkm1_at_qpts[i, :] + l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,)) + nodes.append(l_cur) + + elif variant == "point": + # codimension 1 facets + for i in range(len(t[sd - 1])): + pts_cur = ref_el.make_points(sd - 1, i, sd + degree) + for j in range(len(pts_cur)): + pt_cur = pts_cur[j] + f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur) + nodes.append(f) + + # internal nodes. Let's just use points at a lattice + if degree > 0: + cpe = functional.ComponentPointEvaluation + pts = ref_el.make_points(sd, 0, degree + sd) + for d in range(sd): + for i in range(len(pts)): + l_cur = cpe(ref_el, d, (sd,), pts[i]) + nodes.append(l_cur) # sets vertices (and in 3d, edges) to have no nodes for i in range(sd - 1): @@ -128,13 +144,30 @@ def __init__(self, ref_el, degree): class RaviartThomas(finite_element.CiarletElement): - """The Raviart-Thomas finite element""" + """ + The Raviart Thomas element + + :arg ref_el: The reference element. + :arg k: The degree. + :arg variant: optional variant specifying the types of nodes. + + variant can be chosen from ["point", "integral", "integral(quadrature_degree)"] + "point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal + convergence order in the H(div)-norm + "integral" -> dofs are evaluated by quadrature rule. The quadrature degree is chosen to integrate + polynomials of degree 5*k so that most expressions will be interpolated exactly. This is important + when you want to have (nearly) divergence-preserving interpolation. + "integral(quadrature_degree)" -> dofs are evaluated by quadrature rule of degree quadrature_degree + """ + + def __init__(self, ref_el, k, variant=None): + + degree = k - 1 - def __init__(self, ref_el, q): + (variant, quad_deg) = check_format_variant(variant, degree, "Raviart Thomas") - degree = q - 1 poly_set = RTSpace(ref_el, degree) - dual = RTDualSet(ref_el, degree) + dual = RTDualSet(ref_el, degree, variant, quad_deg) formdegree = ref_el.get_spatial_dimension() - 1 # (n-1)-form super(RaviartThomas, self).__init__(poly_set, dual, degree, formdegree, mapping="contravariant piola") diff --git a/FIAT/reference_element.py b/FIAT/reference_element.py index 85a405890..662a668c6 100644 --- a/FIAT/reference_element.py +++ b/FIAT/reference_element.py @@ -6,6 +6,7 @@ # # Modified by David A. Ham (david.ham@imperial.ac.uk), 2014 # Modified by Lizao Li (lzlarryli@gmail.com), 2016 + """ Abstract class and particular implementations of finite element reference simplex geometry/topology. @@ -482,6 +483,15 @@ def __init__(self): topology = {0: {0: (0,)}} super(Point, self).__init__(POINT, verts, topology) + def construct_subelement(self, dimension): + """Constructs the reference element of a cell subentity + specified by subelement dimension. + + :arg dimension: subentity dimension (integer). Must be zero. + """ + assert dimension == 0 + return self + class DefaultLine(Simplex): """This is the reference line with vertices (-1.0,) and (1.0,).""" @@ -968,6 +978,8 @@ def ufc_cell(cell): return UFCQuadrilateral() elif celltype == "hexahedron": return UFCHexahedron() + elif celltype == "vertex": + return ufc_simplex(0) elif celltype == "interval": return ufc_simplex(1) elif celltype == "triangle": diff --git a/FIAT/serendipity.py b/FIAT/serendipity.py index e5304ad2e..74b158e62 100644 --- a/FIAT/serendipity.py +++ b/FIAT/serendipity.py @@ -6,7 +6,7 @@ # # Modified by David A. Ham (david.ham@imperial.ac.uk), 2019 -from sympy import symbols, legendre, Array, diff +from sympy import symbols, legendre, Array, diff, lambdify import numpy as np from FIAT.finite_element import FiniteElement from FIAT.lagrange import Lagrange @@ -14,6 +14,9 @@ from FIAT.polynomial_set import mis from FIAT.reference_element import (compute_unflattening_map, flatten_reference_cube) +from FIAT.reference_element import make_lattice + +from FIAT.pointwise_dual import compute_pointwise_dual x, y, z = symbols('x y z') variables = (x, y, z) @@ -101,7 +104,8 @@ def __init__(self, ref_el, degree): super(Serendipity, self).__init__(ref_el=ref_el, dual=None, order=degree, formdegree=formdegree) self.basis = {(0,)*dim: Array(s_list)} - + self.basis_callable = {(0,)*dim: lambdify(variables[:dim], Array(s_list), + modules="numpy", dummify=True)} topology = ref_el.get_topology() unflattening_map = compute_unflattening_map(topology) unflattened_entity_ids = {} @@ -122,6 +126,8 @@ def __init__(self, ref_el, degree): self._degree = degree self.flat_el = flat_el + self.dual = compute_pointwise_dual(self, unisolvent_pts(ref_el, degree)) + def degree(self): return self._degree + 1 @@ -137,7 +143,7 @@ def get_coeffs(self): def tabulate(self, order, points, entity=None): if entity is None: - entity = (self.ref_el.get_spatial_dimension(), 0) + entity = (self.ref_el.get_dimension(), 0) entity_dim, entity_id = entity transform = self.ref_el.get_entity_transform(entity_dim, entity_id) @@ -149,21 +155,22 @@ def tabulate(self, order, points, entity=None): raise NotImplementedError('no tabulate method for serendipity elements of dimension 1 or less.') if dim >= 4: raise NotImplementedError('tabulate does not support higher dimensions than 3.') + points = np.asarray(points) + npoints, pointdim = points.shape for o in range(order + 1): alphas = mis(dim, o) for alpha in alphas: try: - polynomials = self.basis[alpha] + callable = self.basis_callable[alpha] except KeyError: polynomials = diff(self.basis[(0,)*dim], *zip(variables, alpha)) + callable = lambdify(variables[:dim], polynomials, modules="numpy", dummify=True) self.basis[alpha] = polynomials - T = np.zeros((len(polynomials), len(points))) - for i in range(len(points)): - subs = {v: points[i][k] for k, v in enumerate(variables[:dim])} - for j, f in enumerate(polynomials): - T[j, i] = f.evalf(subs=subs) + self.basis_callable[alpha] = callable + tabulation = callable(*(points[:, i] for i in range(pointdim))) + T = np.asarray([np.broadcast_to(tab, (npoints, )) + for tab in tabulation]) phivals[alpha] = T - return phivals def entity_dofs(self): @@ -235,3 +242,75 @@ def i_lambda_0(i, dx, dy, dz, x_mid, y_mid, z_mid): for l in range(6, i + 1) for j in range(l-5) for k in range(j+1)]) return IL + + +def unisolvent_pts(K, deg): + flat_el = flatten_reference_cube(K) + dim = flat_el.get_spatial_dimension() + if dim == 2: + return unisolvent_pts_quad(flat_el, deg) + elif dim == 3: + return unisolvent_pts_hex(flat_el, deg) + else: + raise ValueError("Serendipity only defined for quads and hexes") + + +def unisolvent_pts_quad(K, deg): + """Gives a set of unisolvent points for the quad serendipity space of order deg. + The S element is not dual to these nodes, but a dual basis can be constructed from them.""" + L = K.construct_subelement(1) + vs = np.asarray(K.vertices) + pts = [pt for pt in K.vertices] + Lpts = make_lattice(L.vertices, deg, 1) + for e in K.topology[1]: + Fmap = K.get_entity_transform(1, e) + epts = [tuple(Fmap(pt)) for pt in Lpts] + pts.extend(epts) + if deg > 3: + dx0 = (vs[1, :] - vs[0, :]) / (deg-2) + dx1 = (vs[2, :] - vs[0, :]) / (deg-2) + + internal_nodes = [tuple(vs[0, :] + dx0 * i + dx1 * j) + for i in range(1, deg-2) + for j in range(1, deg-1-i)] + pts.extend(internal_nodes) + + return pts + + +def unisolvent_pts_hex(K, deg): + """Gives a set of unisolvent points for the hex serendipity space of order deg. + The S element is not dual to these nodes, but a dual basis can be constructed from them.""" + L = K.construct_subelement(1) + F = K.construct_subelement(2) + vs = np.asarray(K.vertices) + pts = [pt for pt in K.vertices] + Lpts = make_lattice(L.vertices, deg, 1) + for e in K.topology[1]: + Fmap = K.get_entity_transform(1, e) + epts = [tuple(Fmap(pt)) for pt in Lpts] + pts.extend(epts) + if deg > 3: + fvs = np.asarray(F.vertices) + # Planar points to map to each face + dx0 = (fvs[1, :] - fvs[0, :]) / (deg-2) + dx1 = (fvs[2, :] - fvs[0, :]) / (deg-2) + + Fpts = [tuple(fvs[0, :] + dx0 * i + dx1 * j) + for i in range(1, deg-2) + for j in range(1, deg-1-i)] + for f in K.topology[2]: + Fmap = K.get_entity_transform(2, f) + pts.extend([tuple(Fmap(pt)) for pt in Fpts]) + if deg > 5: + dx0 = np.asarray([1., 0, 0]) / (deg-4) + dx1 = np.asarray([0, 1., 0]) / (deg-4) + dx2 = np.asarray([0, 0, 1.]) / (deg-4) + + Ipts = [tuple(vs[0, :] + dx0 * i + dx1 * j + dx2 * k) + for i in range(1, deg-4) + for j in range(1, deg-3-i) + for k in range(1, deg-2-i-j)] + pts.extend(Ipts) + + return pts diff --git a/README.rst b/README.rst index 011d89856..4d9257539 100644 --- a/README.rst +++ b/README.rst @@ -16,37 +16,22 @@ FIAT is part of the FEniCS Project. For more information, visit http://www.fenicsproject.org -Documentation -============= +.. image:: https://github.com/FEniCS/fiat/workflows/FIAT%20CI/badge.svg + :target: https://github.com/FEniCS/fiat/actions?query=workflow%3A%22FIAT+CI%22 + :alt: Build Status -Documentation can be viewed at http://fenics-fiat.readthedocs.org/. +.. image:: https://coveralls.io/repos/github/FEniCS/fiat/badge.svg?branch=master + :target: https://coveralls.io/github/FEniCS/fiat?branch=master + :alt: Coverage Status .. image:: https://readthedocs.org/projects/fenics-fiat/badge/?version=latest :target: http://fenics.readthedocs.io/projects/fiat/en/latest/?badge=latest :alt: Documentation Status +Documentation +============= -Automated Testing ------------------ - -We use CircleCI to perform automated -testing. - -.. image:: https://circleci.com/gh/FEniCS/fiat.svg?style=shield - :target: https://circleci.com/gh/FEniCS/fiat - :alt: Build Status - - -Code Coverage -------------- - -Code coverage reports can be viewed at -https://coveralls.io/github/FEniCS/fiat. - -.. image:: https://coveralls.io/repos/github/FEniCS/fiat/badge.svg?branch=master - :target: https://coveralls.io/github/FEniCS/fiat?branch=master - :alt: Coverage Status - +Documentation can be viewed at http://fenics-fiat.readthedocs.org/. License ======= diff --git a/bitbucket-pipelines.yml b/bitbucket-pipelines.yml index 2227f7921..d8204a0a0 100644 --- a/bitbucket-pipelines.yml +++ b/bitbucket-pipelines.yml @@ -8,7 +8,7 @@ pipelines: - apt-get install -y git python3-minimal python3-pip python3-setuptools python3-wheel --no-install-recommends - - pip3 install flake8 pytest pytest-cases + - pip3 install flake8 pytest - pip3 install . - python3 -m flake8 - DATA_REPO_GIT="" python3 -m pytest -v test/ diff --git a/doc/sphinx/source/conf.py b/doc/sphinx/source/conf.py index 4a1dae1eb..cf9642bc2 100644 --- a/doc/sphinx/source/conf.py +++ b/doc/sphinx/source/conf.py @@ -275,7 +275,7 @@ def run_apidoc(_): os.path.pardir, os.path.pardir)) apidoc_dir = os.path.join(sphinx_source_dir, "api-doc") - from sphinx.apidoc import main + from sphinx.ext.apidoc import main for module in modules: # Generate .rst files ready for autodoc module_dir = os.path.join(repo_dir, module) diff --git a/doc/sphinx/source/manual.rst b/doc/sphinx/source/manual.rst index 36288f22f..ca9a26c30 100644 --- a/doc/sphinx/source/manual.rst +++ b/doc/sphinx/source/manual.rst @@ -11,7 +11,7 @@ FIAT (FInite element Automatic Tabulator) is a Python package for defining and evaluating a wide range of different finite element basis functions for numerical partial differential equations. It is intended to make ``difficult'' elements such as high-order -Brezzi-Douglas-Marini~\cite{} elements usable by providing +Brezzi-Douglas-Marini [BDM]_ elements usable by providing abstractions so that they may be implemented succinctly and hence treated as a black box. FIAT is intended for use at two different levels. For one, it is designed to provide a standard API for finite @@ -21,7 +21,7 @@ rapidly deploy new kinds of finite elements without expensive symbolic computation or tedious algebraic manipulation. It is my goal that a large number of people use FIAT without ever knowing it. Thanks to several ongoing projects such as -Sundance~\cite{}, FFC~\cite{}, and PETSc~\cite{}, it is becoming +Sundance [Sundance]_, FFC [FFC]_, and PETSc [PETSc]_, it is becoming possible to to define finite element methods using mathematical notation in some high-level or domain-specific language. The primary shortcoming of these projects is their lack of support for general @@ -29,8 +29,8 @@ elements. It is one thing to ``provide hooks'' for general elements, but absent a tool such as FIAT, these hooks remain mainly empty. As these projects mature, I hope to expose users of the finite element method to the exotic world of potentially high-degree finite element -on unstructured grids using the best elements in $H^1$, -$H(\mathrm{div})$, and $H(\mathrm{curl})$. +on unstructured grids using the best elements in :math:`H^1`, +:math:`H(\mathrm{div})`, and :math:`H(\mathrm{curl})`. In this brief (and still developing) guide, I will first present the high-level API for users who wish to instantiate a finite @@ -39,155 +39,150 @@ derivatives at some quadrature points. Then, I will explain some of the underlying infrastructure so as to demonstrate how to add new elements. -\chapter{Using FIAT: A tutorial with Lagrange elements} -\section{Importing FIAT} +Using FIAT: A tutorial with Lagrange elements +============================================= +Importing FIAT +-------------- FIAT is organized as a package in Python, consisting of several -modules. In order to get some of the packages, we use the line -\begin{verbatim} -from FIAT import Lagrange, quadrature, shapes -\end{verbatim} +modules. In order to get some of the packages, we use the line :: + from FIAT import Lagrange, quadrature, shapes This loads several modules for the Lagrange elements, quadrature rules, and the simplicial element shapes which FIAT implements. The roles each of these plays will become clear shortly. -\section{Important note} +Important note +-------------- Throughout, FIAT defines the reference elements based on the interval -$(-1,1)$ rather than the more common $(0,1)$. So, the one-dimensional -reference element is $(-1,1)$, the three vertices of the reference -triangle are $(-1,-1),(1,-1),(1,-1)$, and the four vertices of the -reference tetrahedron are $(-1,-1,-1),(1,-1,-1),(-1,1,-1),(-1,-1,1)$. +:math:`(-1,1)` rather than the more common :math:`(0,1)`. So, the one-dimensional +reference element is :math:`(-1,1)`, the three vertices of the reference +triangle are :math:`(-1,-1),(1,-1),(1,-1)`, and the four vertices of the +reference tetrahedron are :math:`(-1,-1,-1),(1,-1,-1),(-1,1,-1),(-1,-1,1)`. -\section{Instantiating elements} +Instantiating elements +---------------------- FIAT uses a lightweight object-oriented infrastructure to define -finite elements. The \verb.Lagrange. module contains a class -\verb.Lagrange. modeling the Lagrange finite element family. This -class is a subclass of some \verb.FiniteElement. class contained in -another module (\verb.polynomial. to be precise). So, having imported -the \verb.Lagrange. module, we can create the Lagrange element of -degree \verb.2. on triangles by -\begin{verbatim} -shape = shapes.TRIANGLE -degree = 2 -U = Lagrange.Lagrange( shape , degree ) -\end{verbatim} -Here, \verb/shapes.TRIANGLE/ is an integer code indicating the two -dimensional simplex. \verb.shapes. also defines -\verb.LINE. and \verb.TETRAHEDRON.. Most of the +finite elements. The ``Lagrange`` module contains a class +``Lagrange`` modeling the Lagrange finite element family. This +class is a subclass of some ``FiniteElement`` class contained in +another module (``polynomial`` to be precise). So, having imported +the ``Lagrange`` module, we can create the Lagrange element of +degree ``2`` on triangles by :: + shape = shapes.TRIANGLE + degree = 2 + U = Lagrange.Lagrange( shape , degree ) +Here, ``shapes.TRIANGLE`` is an integer code indicating the two +dimensional simplex. ``shapes`` also defines +``LINE`` and ``TETRAHEDRON``. Most of the upper-level interface to FIAT is dimensionally abstracted over element shape. -The class \verb.FiniteElement. supports three methods, modeled on the +The class ``FiniteElement`` supports three methods, modeled on the abstract definition of Ciarlet. These methods are -\verb.domain_shape()., \verb.function_space()., and \verb.dual_basis().. +``domain_shape()``, ``function_space()``, and ``dual_basis()``. The first of these returns the code for the shape and the second returns the nodes of the finite element (including information related to topological association of nodes with mesh entities, needed for creating degree of freedom orderings). -\section{Quadrature rules} +Quadrature rules +================ FIAT implements arbitrary-order collapsed quadrature, as discussed in Karniadakis and Sherwin~\cite{}, for the simplex of dimension one, two, or three. The simplest way to get a quadrature rule is through -the function \verb.make_quadrature(shape,m)., which takes a shape code +the function ```make_quadrature(shape,m)```, which takes a shape code and an integer indicating the number of points per direction. For building element matrices using quadratics, we will typically need a -second or third order integration rule, so we can get such a rule by -\begin{verbatim} ->>> Q = quadrature.make_quadrature( shape , 2 ) -\end{verbatim} +second or third order integration rule, so we can get such a rule by :: + >>> Q = quadrature.make_quadrature( shape , 2 ) This uses two points in each direction on the reference square, then maps them to the reference triangle. We may get a -\verb/Numeric.array/ of the quadrature weights with the method -\verb/Q.get_weights()/ and a list of tuples storing the quadrature -points with the method \verb/Q.get_points()/. +``Numeric.array`` of the quadrature weights with the method +``Q.get_weights()`` and a list of tuples storing the quadrature +points with the method ``Q.get_points()``. -\section{Tabulation} +Tabulation +========== FIAT provides functions for tabulating the element basis functions and -their derivatives. To get the \verb.FunctionSpace. object, we do -\begin{verbatim} ->>> Ufs = U.function_space() -\end{verbatim} +their derivatives. To get the ``FunctionSpace`` object, we do :: + >>> Ufs = U.function_space() To get the values of each basis function at each of the quadrature -points, we use the \verb.tabulate(). method -\begin{verbatim} ->>> Ufs.tabulate( Q.get_points() ) -array([[ 0.22176167, -0.12319761, -0.11479229, -0.06377178], - [-0.11479229, -0.06377178, 0.22176167, -0.12319761], - [-0.10696938, 0.18696938, -0.10696938, 0.18696938], - [ 0.11074286, 0.19356495, 0.41329796, 0.72239423], - [ 0.41329796, 0.72239423, 0.11074286, 0.19356495], - [ 0.47595918, 0.08404082, 0.47595918, 0.08404082]]) -\end{verbatim} -This returns a two-dimensional \verb/Numeric.array/ with rows for each +points, we use the ``tabulate()`` method + >>> Ufs.tabulate( Q.get_points() ) + array([[ 0.22176167, -0.12319761, -0.11479229, -0.06377178], + [-0.11479229, -0.06377178, 0.22176167, -0.12319761], + [-0.10696938, 0.18696938, -0.10696938, 0.18696938], + [ 0.11074286, 0.19356495, 0.41329796, 0.72239423], + [ 0.41329796, 0.72239423, 0.11074286, 0.19356495], + [ 0.47595918, 0.08404082, 0.47595918, 0.08404082]]) +This returns a two-dimensional ``Numeric.array`` with rows for each basis function and columns for each input point. Also, finite element codes require tabulation of the basis functions' -derivatives. Each \verb/FunctionSpace/ object also provides a method -\verb/tabulate_jet(i,xs)/ that returns a list of Python dictionaries. -The \verb.i.th entry of the list is a dictionary storing the values of -all \verb.i.th order derivatives. Each dictionary maps a multiindex -(a tuple of length \verb.i.) to the table of the associated partial -derivatives of the basis functions at those points. For example, -\begin{verbatim} ->>> Ufs_jet = Ufs.tabulate_jet( 1 , Q.get_points() ) -\end{verbatim} +derivatives. Each ``FunctionSpace`` object also provides a method +``tabulate_jet(i,xs)`` that returns a list of Python dictionaries. +The ``i``th entry of the list is a dictionary storing the values of +all ``i``th order derivatives. Each dictionary maps a multiindex +(a tuple of length ``i``) to the table of the associated partial +derivatives of the basis functions at those points. For example, :: + >>> Ufs_jet = Ufs.tabulate_jet( 1 , Q.get_points() ) tabulates the zeroth and first partial derivatives of the function -space at the quadrature points. Then, -\begin{verbatim} ->>> Ufs_jet[0] -{(0, 0): array([[ 0.22176167, -0.12319761, -0.11479229, -0.06377178], - [-0.11479229, -0.06377178, 0.22176167, -0.12319761], - [-0.10696938, 0.18696938, -0.10696938, 0.18696938], - [ 0.11074286, 0.19356495, 0.41329796, 0.72239423], - [ 0.41329796, 0.72239423, 0.11074286, 0.19356495], - [ 0.47595918, 0.08404082, 0.47595918, 0.08404082]])} -\end{verbatim} +space at the quadrature points. Then, :: + >>> Ufs_jet[0] + {(0, 0): array([[ 0.22176167, -0.12319761, -0.11479229, -0.06377178], + [-0.11479229, -0.06377178, 0.22176167, -0.12319761], + [-0.10696938, 0.18696938, -0.10696938, 0.18696938], + [ 0.11074286, 0.19356495, 0.41329796, 0.72239423], + [ 0.41329796, 0.72239423, 0.11074286, 0.19356495], + [ 0.47595918, 0.08404082, 0.47595918, 0.08404082]])} gives us a dictionary mapping the only zeroth-order partial derivative to the values of the basis functions at the quadrature points. More interestingly, we may get the first derivatives in the x- and y- -directions with -\begin{verbatim} ->>> Ufs_jet[1][(1,0)] -array([[-0.83278049, -0.06003983, 0.14288254, 0.34993778], - [-0.14288254, -0.34993778, 0.83278049, 0.06003983], - [ 0. , 0. , 0. , 0. ], - [ 0.31010205, 1.28989795, 0.31010205, 1.28989795], - [-0.31010205, -1.28989795, -0.31010205, -1.28989795], - [ 0.97566304, 0.40997761, -0.97566304, -0.40997761]]) ->>> Ufs_jet[1][(0,1)] -array([[ -8.32780492e-01, -6.00398310e-02, 1.42882543e-01, 3.49937780e-01], - [ 7.39494156e-17, 4.29608279e-17, 4.38075188e-17, 7.47961065e-17], - [ -1.89897949e-01, 7.89897949e-01, -1.89897949e-01, 7.89897949e-01], - [ 3.57117457e-01, 1.50062220e-01, 1.33278049e+00, 5.60039831e-01], - [ 1.02267844e+00, -7.29858118e-01, 4.70154051e-02, -1.13983573e+00], - [ -3.57117457e-01, -1.50062220e-01, -1.33278049e+00, -5.60039831e-01]]) -\end{verbatim} - -\chapter{Lower-level API} +directions with :: + >>> Ufs_jet[1][(1,0)] + array([[-0.83278049, -0.06003983, 0.14288254, 0.34993778], + [-0.14288254, -0.34993778, 0.83278049, 0.06003983], + [ 0. , 0. , 0. , 0. ], + [ 0.31010205, 1.28989795, 0.31010205, 1.28989795], + [-0.31010205, -1.28989795, -0.31010205, -1.28989795], + [ 0.97566304, 0.40997761, -0.97566304, -0.40997761]]) + >>> Ufs_jet[1][(0,1)] + array([[ -8.32780492e-01, -6.00398310e-02, 1.42882543e-01, 3.49937780e-01], + [ 7.39494156e-17, 4.29608279e-17, 4.38075188e-17, 7.47961065e-17], + [ -1.89897949e-01, 7.89897949e-01, -1.89897949e-01, 7.89897949e-01], + [ 3.57117457e-01, 1.50062220e-01, 1.33278049e+00, 5.60039831e-01], + [ 1.02267844e+00, -7.29858118e-01, 4.70154051e-02, -1.13983573e+00], + [ -3.57117457e-01, -1.50062220e-01, -1.33278049e+00, -5.60039831e-01]]) + +Lower-level API +=============== Not only does FIAT provide a high-level library interface for users to evaluate existing finite element bases, but it also provides lower-level tools. Here, we survey these tools module-by-module. -\section{shapes.py} +shapes.py +--------- FIAT currenly only supports simplicial reference elements, but does so in a fairly dimensionally-independent way (up to tetrahedra). -\section{jacobi.py} +jacobi.py +--------- This is a low-level module that tabulates the Jacobi polynomials and their derivatives, and also provides Gauss-Jacobi points. This module will seldom if ever be imported directly by users. For more information, consult the documentation strings and source code. -\section{expansions.py} +expansions.py +------------- FIAT relies on orthonormal polynomial bases. These are constructed by mapping appropriate Jacobi polynomials from the reference cube to the reference simplex, as described in the reference of Karniadakis and -Sherwin~\cite{}. The module \texttt{expansions.py} implements these +Sherwin~\cite{}. The module ``expansions.py`` implements these orthonormal expansions. This is also a low-level module that will infrequently be used directly, but it forms the backbone for the -module \texttt{polynomial.py} +module ``polynomial.py``. -\section{quadrature.py} +quadrature.py +------------- FIAT makes heavy use of numerical quadrature, both internally and in the user interface. Internally, many function spaces or degrees of freedom are defined in terms of integral quantities having certain @@ -201,42 +196,49 @@ rules. In the future, we hope to have the best symmetric existing rules integrated into FIAT. Unless one is modifying the quadrature rules available, all of the -functionality of \texttt{quadrature.py} may be accessed through the -single function \verb.make_quadrature.. +functionality of ``quadrature.py`` may be accessed through the +single function ``make_quadrature``. This function takes the code for a shape and the number of points in each coordinate direction and returns a quadrature rule. Internally, there is a lightweight class hierarchy rooted at an abstract -\texttt{QuadratureRule} class, where the quadrature rules for +``QuadratureRule`` class, where the quadrature rules for different shapes are actually different classes. However, the dynamic typing of Python relieves the user from these considerations. The -interface to an instance consists in the following methods -\begin{itemize} -\item \verb.get_points()., which returns a list of the quadrature +interface to an instance consists in the following methods. + +- ``get_points()``, which returns a list of the quadrature points, each stored as a tuple. For dimensional uniformity, one-dimensional quadrature rules are stored as lists of 1-tuples rather than as lists of numbers. -\item \verb.get_weights()., which returns a \texttt{Numeric.array} +- ``get_weights()``, which returns a ``Numeric.array`` of quadrature weights. -\item \verb.integrate(f)., which takes a callable object \texttt{f} +- ``integrate(f)``, which takes a callable object ``f`` and returns the (approximate) integral over the domain -\item Also, the \verb.__call__. method is overloaded so that a +- Also, the ``__call__`` method is overloaded so that a quadrature rule may be applied to a callable object. This is - syntactic sugar on top of the \texttt{integrate} method. -\end{itemize} + syntactic sugar on top of the ``integrate`` method. -\section{polynomial.py} -The \texttt{polynomial} module provides the bulk of the classes +polynomial.py +------------- +The ``polynomial`` module provides the bulk of the classes needed to represent polynomial bases and finite element spaces. -The class \texttt{PolynomialBase} provides a high-level access to +The class ``PolynomialBase`` provides a high-level access to the orthonormal expansion bases; it is typically not instantiated directly in an application, but all other kinds of polynomial bases are constructed as linear combinations of the members of a -\texttt{PolynomialBase} instance. The module provides classes for +``PolynomialBase`` instance. The module provides classes for scalar and vector-valued polynomial sets, as well as an interface to individual polynomials and finite element spaces. -\subsection{\texttt{PolynomialBase}} +PolynomialBase +^^^^^^^^^^^^^^ -\subsection{\texttt{PolynomialSet}} -The \texttt{PolynomialSet} function is a factory function interface into +PolynomialSet +^^^^^^^^^^^^^ +The ``PolynomialSet`` function is a factory function interface into the hierarchy + +.. [BDM] Brezzi, Franco; Douglas, Jim, Jr.; Marini, L. D. "Two families of mixed finite elements for second order elliptic problems". Numerische Mathematik. vol 47. no. 2. June 1985. 217—235. doi:10.1007/BF01389710 +.. [Sundance] http://www.math.ttu.edu/~klong/Sundance/html/index.html +.. [FFC] https://bitbucket.org/fenics-project/ffc/src/master/ +.. [PETSc] https://www.mcs.anl.gov/petsc/ \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index 77b2a6048..54d937f11 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,6 @@ [flake8] -ignore = E501,E226,E731,W504 +ignore = E501,E226,E731,W504, + E741 # ambiguous variable name exclude = .git,__pycache__,doc/sphinx/source/conf.py,build,dist min-version = 3.0 diff --git a/test/unit/test_awc.py b/test/unit/test_awc.py new file mode 100644 index 000000000..b876c493a --- /dev/null +++ b/test/unit/test_awc.py @@ -0,0 +1,145 @@ +import numpy as np +from FIAT import ufc_simplex, ArnoldWinther, make_quadrature, expansions + + +def test_dofs(): + line = ufc_simplex(1) + T = ufc_simplex(2) + T.vertices = np.random.rand(3, 2) + AW = ArnoldWinther(T, 3) + + # check Kronecker property at vertices + + bases = [[[1, 0], [0, 0]], [[0, 1], [1, 0]], [[0, 0], [0, 1]]] + + vert_vals = AW.tabulate(0, T.vertices)[(0, 0)] + for i in range(3): + for j in range(3): + assert np.allclose(vert_vals[3*i+j, :, :, i], bases[j]) + for k in (1, 2): + assert np.allclose(vert_vals[3*i+j, :, :, (i+k) % 3], np.zeros((2, 2))) + + # check edge moments + Qline = make_quadrature(line, 6) + + linebfs = expansions.LineExpansionSet(line) + linevals = linebfs.tabulate(1, Qline.pts) + + # n, n moments + for ed in range(3): + n = T.compute_scaled_normal(ed) + wts = np.asarray(Qline.wts) + nqpline = len(wts) + + vals = AW.tabulate(0, Qline.pts, (1, ed))[(0, 0)] + nnvals = np.zeros((30, nqpline)) + for i in range(30): + for j in range(len(wts)): + nnvals[i, j] = n @ vals[i, :, :, j] @ n + + nnmoments = np.zeros((30, 2)) + + for bf in range(30): + for k in range(nqpline): + for m in (0, 1): + nnmoments[bf, m] += wts[k] * nnvals[bf, k] * linevals[m, k] + + for bf in range(30): + if bf != AW.dual.entity_ids[1][ed][0] and bf != AW.dual.entity_ids[1][ed][2]: + assert np.allclose(nnmoments[bf, :], np.zeros(2)) + + # n, t moments + for ed in range(3): + n = T.compute_scaled_normal(ed) + t = T.compute_edge_tangent(ed) + wts = np.asarray(Qline.wts) + nqpline = len(wts) + + vals = AW.tabulate(0, Qline.pts, (1, ed))[(0, 0)] + ntvals = np.zeros((30, nqpline)) + for i in range(30): + for j in range(len(wts)): + ntvals[i, j] = n @ vals[i, :, :, j] @ t + + ntmoments = np.zeros((30, 2)) + + for bf in range(30): + for k in range(nqpline): + for m in (0, 1): + ntmoments[bf, m] += wts[k] * ntvals[bf, k] * linevals[m, k] + + for bf in range(30): + if bf != AW.dual.entity_ids[1][ed][1] and bf != AW.dual.entity_ids[1][ed][3]: + assert np.allclose(ntmoments[bf, :], np.zeros(2)) + + # check internal dofs + Q = make_quadrature(T, 6) + qpvals = AW.tabulate(0, Q.pts)[(0, 0)] + const_moms = qpvals @ Q.wts + assert np.allclose(const_moms[:21], np.zeros((21, 2, 2))) + assert np.allclose(const_moms[24:], np.zeros((6, 2, 2))) + assert np.allclose(const_moms[21:24, 0, 0], np.asarray([1, 0, 0])) + assert np.allclose(const_moms[21:24, 0, 1], np.asarray([0, 1, 0])) + assert np.allclose(const_moms[21:24, 1, 0], np.asarray([0, 1, 0])) + assert np.allclose(const_moms[21:24, 1, 1], np.asarray([0, 0, 1])) + + +def frob(a, b): + return a.ravel() @ b.ravel() + + +def test_projection(): + T = ufc_simplex(2) + T.vertices = np.asarray([(0.0, 0.0), (1.0, 0.0), (0.5, 2.1)]) + + AW = ArnoldWinther(T, 3) + + Q = make_quadrature(T, 4) + qpts = np.asarray(Q.pts) + qwts = np.asarray(Q.wts) + nqp = len(Q.wts) + + nbf = 24 + m = np.zeros((nbf, nbf)) + b = np.zeros((24,)) + rhs_vals = np.zeros((2, 2, nqp)) + + bfvals = AW.tabulate(0, qpts)[(0, 0)][:nbf, :, :, :] + + for i in range(nbf): + for j in range(nbf): + for k in range(nqp): + m[i, j] += qwts[k] * frob(bfvals[i, :, :, k], + bfvals[j, :, :, k]) + + assert np.linalg.cond(m) < 1.e12 + + comps = [(0, 0), (0, 1), (0, 0)] + + # loop over monomials up to degree 2 + for deg in range(3): + for jj in range(deg+1): + ii = deg-jj + for comp in comps: + b[:] = 0.0 + # set RHS (symmetrically) to be the monomial in + # the proper component. + rhs_vals[comp] = qpts[:, 0]**ii * qpts[:, 1]**jj + rhs_vals[tuple(reversed(comp))] = rhs_vals[comp] + for i in range(nbf): + for k in range(nqp): + b[i] += qwts[k] * frob(bfvals[i, :, :, k], + rhs_vals[:, :, k]) + x = np.linalg.solve(m, b) + + sol_at_qpts = np.zeros(rhs_vals.shape) + for i in range(nbf): + for k in range(nqp): + sol_at_qpts[:, :, k] += x[i] * bfvals[i, :, :, k] + + diff = sol_at_qpts - rhs_vals + err = 0.0 + for k in range(nqp): + err += qwts[k] * frob(diff[:, :, k], diff[:, :, k]) + + assert np.sqrt(err) < 1.e-12 diff --git a/test/unit/test_awnc.py b/test/unit/test_awnc.py new file mode 100644 index 000000000..c2672ff9d --- /dev/null +++ b/test/unit/test_awnc.py @@ -0,0 +1,72 @@ +import numpy as np +from FIAT import ufc_simplex, ArnoldWintherNC, make_quadrature, expansions + + +def test_dofs(): + line = ufc_simplex(1) + T = ufc_simplex(2) + T.vertices = np.random.rand(3, 2) + AW = ArnoldWintherNC(T, 2) + + Qline = make_quadrature(line, 6) + + linebfs = expansions.LineExpansionSet(line) + linevals = linebfs.tabulate(1, Qline.pts) + + # n, n moments + for ed in range(3): + n = T.compute_scaled_normal(ed) + wts = np.asarray(Qline.wts) + nqpline = len(wts) + + vals = AW.tabulate(0, Qline.pts, (1, ed))[(0, 0)] + nnvals = np.zeros((18, nqpline)) + for i in range(18): + for j in range(len(wts)): + nnvals[i, j] = n @ vals[i, :, :, j] @ n + + nnmoments = np.zeros((18, 2)) + + for bf in range(18): + for k in range(nqpline): + for m in (0, 1): + nnmoments[bf, m] += wts[k] * nnvals[bf, k] * linevals[m, k] + + for bf in range(18): + if bf != AW.dual.entity_ids[1][ed][0] and bf != AW.dual.entity_ids[1][ed][2]: + assert np.allclose(nnmoments[bf, :], np.zeros(2)) + + # n, t moments + for ed in range(3): + n = T.compute_scaled_normal(ed) + t = T.compute_edge_tangent(ed) + wts = np.asarray(Qline.wts) + nqpline = len(wts) + + vals = AW.tabulate(0, Qline.pts, (1, ed))[(0, 0)] + ntvals = np.zeros((18, nqpline)) + for i in range(18): + for j in range(len(wts)): + ntvals[i, j] = n @ vals[i, :, :, j] @ t + + ntmoments = np.zeros((18, 2)) + + for bf in range(18): + for k in range(nqpline): + for m in (0, 1): + ntmoments[bf, m] += wts[k] * ntvals[bf, k] * linevals[m, k] + + for bf in range(18): + if bf != AW.dual.entity_ids[1][ed][1] and bf != AW.dual.entity_ids[1][ed][3]: + assert np.allclose(ntmoments[bf, :], np.zeros(2), atol=1.e-7) + + # check internal dofs + Q = make_quadrature(T, 6) + qpvals = AW.tabulate(0, Q.pts)[(0, 0)] + const_moms = qpvals @ Q.wts + assert np.allclose(const_moms[:12], np.zeros((12, 2, 2))) + assert np.allclose(const_moms[15:], np.zeros((3, 2, 2))) + assert np.allclose(const_moms[12:15, 0, 0], np.asarray([1, 0, 0])) + assert np.allclose(const_moms[12:15, 0, 1], np.asarray([0, 1, 0])) + assert np.allclose(const_moms[12:15, 1, 0], np.asarray([0, 1, 0])) + assert np.allclose(const_moms[12:15, 1, 1], np.asarray([0, 0, 1])) diff --git a/test/unit/test_fiat.py b/test/unit/test_fiat.py index a2b2c794d..463c5fc3c 100644 --- a/test/unit/test_fiat.py +++ b/test/unit/test_fiat.py @@ -20,7 +20,7 @@ import pytest from FIAT.reference_element import LINE, ReferenceElement -from FIAT.reference_element import UFCInterval, UFCTriangle, UFCTetrahedron +from FIAT.reference_element import Point, UFCInterval, UFCTriangle, UFCTetrahedron from FIAT.lagrange import Lagrange from FIAT.discontinuous_lagrange import DiscontinuousLagrange # noqa: F401 from FIAT.discontinuous_taylor import DiscontinuousTaylor # noqa: F401 @@ -49,7 +49,7 @@ from FIAT.enriched import EnrichedElement # noqa: F401 from FIAT.nodal_enriched import NodalEnrichedElement - +P = Point() I = UFCInterval() # noqa: E741 T = UFCTriangle() S = UFCTetrahedron() @@ -110,6 +110,7 @@ def __init__(self, a, b): "P0(I)", "P0(T)", "P0(S)", + "DiscontinuousLagrange(P, 0)", "DiscontinuousLagrange(I, 0)", "DiscontinuousLagrange(I, 1)", "DiscontinuousLagrange(I, 2)", @@ -137,6 +138,24 @@ def __init__(self, a, b): "RaviartThomas(S, 1)", "RaviartThomas(S, 2)", "RaviartThomas(S, 3)", + 'RaviartThomas(T, 1, variant="integral")', + 'RaviartThomas(T, 2, variant="integral")', + 'RaviartThomas(T, 3, variant="integral")', + 'RaviartThomas(S, 1, variant="integral")', + 'RaviartThomas(S, 2, variant="integral")', + 'RaviartThomas(S, 3, variant="integral")', + 'RaviartThomas(T, 1, variant="integral(2)")', + 'RaviartThomas(T, 2, variant="integral(3)")', + 'RaviartThomas(T, 3, variant="integral(4)")', + 'RaviartThomas(S, 1, variant="integral(2)")', + 'RaviartThomas(S, 2, variant="integral(3)")', + 'RaviartThomas(S, 3, variant="integral(4)")', + 'RaviartThomas(T, 1, variant="point")', + 'RaviartThomas(T, 2, variant="point")', + 'RaviartThomas(T, 3, variant="point")', + 'RaviartThomas(S, 1, variant="point")', + 'RaviartThomas(S, 2, variant="point")', + 'RaviartThomas(S, 3, variant="point")', "DiscontinuousRaviartThomas(T, 1)", "DiscontinuousRaviartThomas(T, 2)", "DiscontinuousRaviartThomas(T, 3)", @@ -149,18 +168,72 @@ def __init__(self, a, b): "BrezziDouglasMarini(S, 1)", "BrezziDouglasMarini(S, 2)", "BrezziDouglasMarini(S, 3)", + 'BrezziDouglasMarini(T, 1, variant="integral")', + 'BrezziDouglasMarini(T, 2, variant="integral")', + 'BrezziDouglasMarini(T, 3, variant="integral")', + 'BrezziDouglasMarini(S, 1, variant="integral")', + 'BrezziDouglasMarini(S, 2, variant="integral")', + 'BrezziDouglasMarini(S, 3, variant="integral")', + 'BrezziDouglasMarini(T, 1, variant="integral(2)")', + 'BrezziDouglasMarini(T, 2, variant="integral(3)")', + 'BrezziDouglasMarini(T, 3, variant="integral(4)")', + 'BrezziDouglasMarini(S, 1, variant="integral(2)")', + 'BrezziDouglasMarini(S, 2, variant="integral(3)")', + 'BrezziDouglasMarini(S, 3, variant="integral(4)")', + 'BrezziDouglasMarini(T, 1, variant="point")', + 'BrezziDouglasMarini(T, 2, variant="point")', + 'BrezziDouglasMarini(T, 3, variant="point")', + 'BrezziDouglasMarini(S, 1, variant="point")', + 'BrezziDouglasMarini(S, 2, variant="point")', + 'BrezziDouglasMarini(S, 3, variant="point")', "Nedelec(T, 1)", "Nedelec(T, 2)", "Nedelec(T, 3)", "Nedelec(S, 1)", "Nedelec(S, 2)", "Nedelec(S, 3)", + 'Nedelec(T, 1, variant="integral")', + 'Nedelec(T, 2, variant="integral")', + 'Nedelec(T, 3, variant="integral")', + 'Nedelec(S, 1, variant="integral")', + 'Nedelec(S, 2, variant="integral")', + 'Nedelec(S, 3, variant="integral")', + 'Nedelec(T, 1, variant="integral(2)")', + 'Nedelec(T, 2, variant="integral(3)")', + 'Nedelec(T, 3, variant="integral(4)")', + 'Nedelec(S, 1, variant="integral(2)")', + 'Nedelec(S, 2, variant="integral(3)")', + 'Nedelec(S, 3, variant="integral(4)")', + 'Nedelec(T, 1, variant="point")', + 'Nedelec(T, 2, variant="point")', + 'Nedelec(T, 3, variant="point")', + 'Nedelec(S, 1, variant="point")', + 'Nedelec(S, 2, variant="point")', + 'Nedelec(S, 3, variant="point")', "NedelecSecondKind(T, 1)", "NedelecSecondKind(T, 2)", "NedelecSecondKind(T, 3)", "NedelecSecondKind(S, 1)", "NedelecSecondKind(S, 2)", "NedelecSecondKind(S, 3)", + 'NedelecSecondKind(T, 1, variant="integral")', + 'NedelecSecondKind(T, 2, variant="integral")', + 'NedelecSecondKind(T, 3, variant="integral")', + 'NedelecSecondKind(S, 1, variant="integral")', + 'NedelecSecondKind(S, 2, variant="integral")', + 'NedelecSecondKind(S, 3, variant="integral")', + 'NedelecSecondKind(T, 1, variant="integral(2)")', + 'NedelecSecondKind(T, 2, variant="integral(3)")', + 'NedelecSecondKind(T, 3, variant="integral(4)")', + 'NedelecSecondKind(S, 1, variant="integral(2)")', + 'NedelecSecondKind(S, 2, variant="integral(3)")', + 'NedelecSecondKind(S, 3, variant="integral(4)")', + 'NedelecSecondKind(T, 1, variant="point")', + 'NedelecSecondKind(T, 2, variant="point")', + 'NedelecSecondKind(T, 3, variant="point")', + 'NedelecSecondKind(S, 1, variant="point")', + 'NedelecSecondKind(S, 2, variant="point")', + 'NedelecSecondKind(S, 3, variant="point")', "Regge(T, 0)", "Regge(T, 1)", "Regge(T, 2)", @@ -423,6 +496,15 @@ def test_facet_nodality_tabulate(element): assert np.isclose(basis[i], 1.0 if i == j else 0.0) +@pytest.mark.parametrize('element', [ + 'Nedelec(S, 3, variant="integral(2)")', + 'NedelecSecondKind(S, 3, variant="integral(3)")' +]) +def test_error_quadrature_degree(element): + with pytest.raises(ValueError): + eval(element) + + if __name__ == '__main__': import os pytest.main(os.path.abspath(__file__)) diff --git a/test/unit/test_gauss_radau.py b/test/unit/test_gauss_radau.py new file mode 100644 index 000000000..78d0ea8ed --- /dev/null +++ b/test/unit/test_gauss_radau.py @@ -0,0 +1,48 @@ +# Copyright (C) 2016 Imperial College London and others +# +# This file is part of FIAT. +# +# FIAT is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# FIAT is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with FIAT. If not, see . +# +# Authors: +# +# Robert Kirby, based on work of David A. Ham +# + +import pytest +import numpy as np + + +@pytest.mark.parametrize("degree", range(1, 7)) +def test_gll_basis_values(degree): + """Ensure that integrating a simple monomial produces the expected results.""" + from FIAT import ufc_simplex, GaussRadau, make_quadrature + + s = ufc_simplex(1) + q = make_quadrature(s, degree + 1) + + fe = GaussRadau(s, degree) + tab = fe.tabulate(0, q.pts)[(0,)] + + for test_degree in range(degree + 1): + coefs = [n(lambda x: x[0]**test_degree) for n in fe.dual.nodes] + integral = np.dot(coefs, np.dot(tab, q.wts)) + reference = np.dot([x[0]**test_degree + for x in q.pts], q.wts) + assert np.allclose(integral, reference, rtol=1e-14) + + +if __name__ == '__main__': + import os + pytest.main(os.path.abspath(__file__)) diff --git a/test/unit/test_kong_mulder_veldhuizen.py b/test/unit/test_kong_mulder_veldhuizen.py new file mode 100644 index 000000000..5fce238ec --- /dev/null +++ b/test/unit/test_kong_mulder_veldhuizen.py @@ -0,0 +1,148 @@ +import numpy as np +import pytest + +from FIAT.reference_element import UFCInterval, UFCTriangle, UFCTetrahedron +from FIAT import create_quadrature, make_quadrature, polynomial_set +from FIAT.kong_mulder_veldhuizen import KongMulderVeldhuizen as KMV + +I = UFCInterval() +T = UFCTriangle() +Te = UFCTetrahedron() + + +@pytest.mark.parametrize("p_d", [(1, 1), (2, 3), (3, 4)]) +def test_kmv_quad_tet_schemes(p_d): # noqa: W503 + fct = np.math.factorial + p, d = p_d + q = create_quadrature(Te, p, "KMV") + for i in range(d + 1): + for j in range(d + 1 - i): + for k in range(d + 1 - i - j): + trueval = fct(i) * fct(j) * fct(k) / fct(i + j + k + 3) + assert ( + np.abs( + trueval - + q.integrate(lambda x: x[0] ** i * x[1] ** j * x[2] ** k) + ) < + 1.0e-10 + ) + + +@pytest.mark.parametrize("p_d", [(1, 1), (2, 3), (3, 5), (4, 7), (5, 9)]) +def test_kmv_quad_tri_schemes(p_d): + fct = np.math.factorial + p, d = p_d + q = create_quadrature(T, p, "KMV") + for i in range(d + 1): + for j in range(d + 1 - i): + trueval = fct(i) * fct(j) / fct(i + j + 2) + assert ( + np.abs(trueval - q.integrate(lambda x: x[0] ** i * x[1] ** j)) < 1.0e-10 + ) + + +@pytest.mark.parametrize( + "element_degree", + [(KMV(T, 1), 1), (KMV(T, 2), 2), (KMV(T, 3), 3), (KMV(T, 4), 4), (KMV(T, 5), 5)], +) +def test_Kronecker_property_tris(element_degree): + """ + Evaluating the nodal basis at the special quadrature points should + have a Kronecker property. Also checks that the basis functions + and quadrature points are given the same ordering. + """ + element, degree = element_degree + qr = create_quadrature(T, degree, scheme="KMV") + (basis,) = element.tabulate(0, qr.get_points()).values() + assert np.allclose(basis, np.eye(*basis.shape)) + + +@pytest.mark.parametrize( + "element_degree", [(KMV(Te, 1), 1), (KMV(Te, 2), 2), (KMV(Te, 3), 3)] +) +def test_Kronecker_property_tets(element_degree): + """ + Evaluating the nodal basis at the special quadrature points should + have a Kronecker property. Also checks that the basis functions + and quadrature points are given the same ordering. + """ + element, degree = element_degree + qr = create_quadrature(Te, degree, scheme="KMV") + (basis,) = element.tabulate(0, qr.get_points()).values() + assert np.allclose(basis, np.eye(*basis.shape)) + + +@pytest.mark.parametrize("degree", [2, 3, 4]) +def test_edge_degree(degree): + """Verify that the outer edges of a degree KMV element + are indeed of degree and the interior is of degree+1""" + # create a degree+1 polynomial + I = UFCInterval() + # an exact quad. rule for a degree+1 polynomial on the UFCinterval + qr = make_quadrature(I, degree + 1) + W = np.diag(qr.wts) + sd = I.get_spatial_dimension() + pset = polynomial_set.ONPolynomialSet(I, degree + 1, (sd,)) + pset = pset.take([degree + 1]) + # tabulate at the quadrature points + interval_vals = pset.tabulate(qr.get_points())[(0,)] + interval_vals = np.squeeze(interval_vals) + # create degree KMV element (should have degree outer edges and degree+1 edge in center) + T = UFCTriangle() + element = KMV(T, degree) + # tabulate values on an edge of the KMV element + for e in range(3): + edge_values = element.tabulate(0, qr.get_points(), (1, e))[(0, 0)] + # degree edge should be orthogonal to degree+1 ONpoly edge values + result = edge_values @ W @ interval_vals.T + assert np.allclose(np.sum(result), 0.0) + + +@pytest.mark.parametrize( + "element_degree", + [(KMV(T, 1), 1), (KMV(T, 2), 2), (KMV(T, 3), 3), (KMV(T, 4), 4), (KMV(T, 5), 5)], +) +def test_interpolate_monomials_tris(element_degree): + element, degree = element_degree + + # ordered the same way as KMV nodes + pts = create_quadrature(T, degree, "KMV").pts + + Q = make_quadrature(T, 2 * degree) + phis = element.tabulate(0, Q.pts)[0, 0] + print("deg", degree) + for i in range(degree + 1): + for j in range(degree + 1 - i): + m = lambda x: x[0] ** i * x[1] ** j + dofs = np.array([m(pt) for pt in pts]) + interp = phis.T @ dofs + matqp = np.array([m(pt) for pt in Q.pts]) + err = 0.0 + for k in range(phis.shape[1]): + err += Q.wts[k] * (interp[k] - matqp[k]) ** 2 + assert np.sqrt(err) <= 1.0e-12 + + +@pytest.mark.parametrize( + "element_degree", [(KMV(Te, 1), 1), (KMV(Te, 2), 2), (KMV(Te, 3), 3)] +) +def test_interpolate_monomials_tets(element_degree): + element, degree = element_degree + + # ordered the same way as KMV nodes + pts = create_quadrature(Te, degree, "KMV").pts + + Q = make_quadrature(Te, 2 * degree) + phis = element.tabulate(0, Q.pts)[0, 0, 0] + print("deg", degree) + for i in range(degree + 1): + for j in range(degree + 1 - i): + for k in range(degree + 1 - i - j): + m = lambda x: x[0] ** i * x[1] ** j * x[2] ** k + dofs = np.array([m(pt) for pt in pts]) + interp = phis.T @ dofs + matqp = np.array([m(pt) for pt in Q.pts]) + err = 0.0 + for kk in range(phis.shape[1]): + err += Q.wts[kk] * (interp[kk] - matqp[kk]) ** 2 + assert np.sqrt(err) <= 1.0e-12 diff --git a/test/unit/test_mtw.py b/test/unit/test_mtw.py new file mode 100644 index 000000000..bef9f2fe8 --- /dev/null +++ b/test/unit/test_mtw.py @@ -0,0 +1,43 @@ +import numpy as np +from FIAT import ufc_simplex, MardalTaiWinther, make_quadrature, expansions + + +def test_dofs(): + line = ufc_simplex(1) + T = ufc_simplex(2) + T.vertices = np.random.rand(3, 2) + MTW = MardalTaiWinther(T, 3) + + Qline = make_quadrature(line, 6) + + linebfs = expansions.LineExpansionSet(line) + linevals = linebfs.tabulate(1, Qline.pts) + + for ed in range(3): + n = T.compute_scaled_normal(ed) + wts = np.asarray(Qline.wts) + + vals = MTW.tabulate(0, Qline.pts, (1, ed))[(0, 0)] + nvals = np.dot(np.transpose(vals, (0, 2, 1)), n) + normal_moments = np.zeros((9, 2)) + for bf in range(9): + for k in range(len(Qline.wts)): + for m in (0, 1): + normal_moments[bf, m] += wts[k] * nvals[bf, k] * linevals[m, k] + right = np.zeros((9, 2)) + right[3*ed, 0] = 1.0 + right[3*ed+2, 1] = 1.0 + assert np.allclose(normal_moments, right) + for ed in range(3): + t = T.compute_edge_tangent(ed) + wts = np.asarray(Qline.wts) + + vals = MTW.tabulate(0, Qline.pts, (1, ed))[(0, 0)] + tvals = np.dot(np.transpose(vals, (0, 2, 1)), t) + tangent_moments = np.zeros(9) + for bf in range(9): + for k in range(len(Qline.wts)): + tangent_moments[bf] += wts[k] * tvals[bf, k] * linevals[0, k] + right = np.zeros(9) + right[3*ed + 1] = 1.0 + assert np.allclose(tangent_moments, right) diff --git a/test/unit/test_pointwise_dual.py b/test/unit/test_pointwise_dual.py new file mode 100644 index 000000000..4d2698408 --- /dev/null +++ b/test/unit/test_pointwise_dual.py @@ -0,0 +1,34 @@ +# Copyright (C) 2020 Robert C Kirby (Baylor University) +# +# This file is part of FIAT (https://www.fenicsproject.org) +# +# SPDX-License-Identifier: LGPL-3.0-or-later + +import pytest +import numpy + +from FIAT import ( + BrezziDouglasMarini, Morley, QuinticArgyris, CubicHermite) + +from FIAT.reference_element import ( + UFCTriangle, + make_lattice) + +from FIAT.pointwise_dual import compute_pointwise_dual as cpd + +T = UFCTriangle() + + +@pytest.mark.parametrize("element", + [CubicHermite(T), + Morley(T), + QuinticArgyris(T), + BrezziDouglasMarini(T, 1, variant="integral")]) +def test_pw_dual(element): + deg = element.degree() + ref_el = element.ref_el + poly_set = element.poly_set + pts = make_lattice(ref_el.vertices, deg) + + assert numpy.allclose(element.dual.to_riesz(poly_set), + cpd(element, pts).to_riesz(poly_set)) diff --git a/test/unit/test_quadrature.py b/test/unit/test_quadrature.py index e800bed6c..8b90d19ba 100644 --- a/test/unit/test_quadrature.py +++ b/test/unit/test_quadrature.py @@ -19,7 +19,6 @@ import numpy import pytest -from pytest_cases import pytest_parametrize_plus, fixture_ref import FIAT from FIAT.reference_element import UFCInterval, UFCTriangle, UFCTetrahedron from FIAT.reference_element import UFCQuadrilateral, UFCHexahedron, TensorProductCell @@ -50,6 +49,22 @@ def hexahedron(): return UFCHexahedron() +# This unified fixture enables tests parametrised over different cells. +@pytest.fixture(params=["interval", + "triangle", + "quadrilateral", + "hexahedron"]) +def cell(request): + if request.param == "interval": + return UFCInterval() + elif request.param == "triangle": + return UFCTriangle() + elif request.param == "quadrilateral": + return UFCTriangle() + elif request.param == "hexahedron": + return UFCTriangle() + + @pytest.fixture(scope='module') def extr_interval(): """Extruded interval = interval x interval""" @@ -68,6 +83,19 @@ def extr_quadrilateral(): return TensorProductCell(UFCQuadrilateral(), UFCInterval()) +# This unified fixture enables tests parametrised over different extruded cells. +@pytest.fixture(params=["extr_interval", + "extr_triangle", + "extr_quadrilateral"]) +def extr_cell(request): + if request.param == "extr_interval": + return TensorProductCell(UFCInterval(), UFCInterval()) + elif request.param == "extr_triangle": + return TensorProductCell(UFCTriangle(), UFCInterval()) + elif request.param == "extr_quadrilateral": + return TensorProductCell(UFCQuadrilateral(), UFCInterval()) + + @pytest.fixture(params=["canonical", "default"]) def scheme(request): return request.param @@ -135,21 +163,14 @@ def test_create_quadrature_extr_quadrilateral(extr_quadrilateral, basedeg, extrd (2**(basedeg + 2) - 2) / ((basedeg + 1)*(basedeg + 2)) * 1/(extrdeg + 1)) -@pytest_parametrize_plus("cell", [fixture_ref(interval), - fixture_ref(triangle), - fixture_ref(tetrahedron), - fixture_ref(quadrilateral)]) def test_invalid_quadrature_degree(cell, scheme): with pytest.raises(ValueError): FIAT.create_quadrature(cell, -1, scheme) -@pytest_parametrize_plus("cell", [fixture_ref(extr_interval), - fixture_ref(extr_triangle), - fixture_ref(extr_quadrilateral)]) -def test_invalid_quadrature_degree_tensor_prod(cell): +def test_invalid_quadrature_degree_tensor_prod(extr_cell): with pytest.raises(ValueError): - FIAT.create_quadrature(cell, (-1, -1)) + FIAT.create_quadrature(extr_cell, (-1, -1)) def test_tensor_product_composition(interval, triangle, extr_triangle, scheme): @@ -172,6 +193,18 @@ def test_gauss_lobatto_legendre_quadrature(interval, points, degree): assert numpy.round(q.integrate(lambda x: x[0]**degree) - 1./(degree+1), 14) == 0. +@pytest.mark.parametrize(("points, degree"), tuple((p, d) + for p in range(2, 10) + for d in range(2*p - 1))) +def test_radau_legendre_quadrature(interval, points, degree): + """Check that the quadrature rules correctly integrate all the right + polynomial degrees.""" + + q = FIAT.quadrature.RadauQuadratureLineRule(interval, points) + + assert numpy.round(q.integrate(lambda x: x[0]**degree) - 1./(degree+1), 14) == 0. + + @pytest.mark.parametrize(("points, degree"), tuple((p, d) for p in range(2, 10) for d in range(2*p))) diff --git a/test/unit/test_serendipity.py b/test/unit/test_serendipity.py new file mode 100644 index 000000000..c6a39b074 --- /dev/null +++ b/test/unit/test_serendipity.py @@ -0,0 +1,46 @@ +from FIAT.reference_element import ( + UFCQuadrilateral, UFCInterval, TensorProductCell) +from FIAT import Serendipity +import numpy as np +import sympy + + +def test_serendipity_derivatives(): + cell = UFCQuadrilateral() + S = Serendipity(cell, 2) + + x = sympy.DeferredVector("X") + X, Y = x[0], x[1] + basis_functions = [ + (1 - X)*(1 - Y), + Y*(1 - X), + X*(1 - Y), + X*Y, + Y*(1 - X)*(Y - 1), + X*Y*(Y - 1), + X*(1 - Y)*(X - 1), + X*Y*(X - 1), + ] + points = [[0.5, 0.5], [0.25, 0.75]] + for alpha, actual in S.tabulate(2, points).items(): + expect = list(sympy.diff(basis, *zip([X, Y], alpha)) + for basis in basis_functions) + expect = list([basis.subs(dict(zip([X, Y], point))) + for point in points] + for basis in expect) + assert actual.shape == (8, 2) + assert np.allclose(np.asarray(expect, dtype=float), + actual.reshape(8, 2)) + + +def test_dual_tensor_versus_ufc(): + K0 = UFCQuadrilateral() + ell = UFCInterval() + K1 = TensorProductCell(ell, ell) + S0 = Serendipity(K0, 2) + S1 = Serendipity(K1, 2) + # since both elements go through the flattened cell to produce the + # dual basis, they ought to do *exactly* the same calculations and + # hence form exactly the same nodes. + for i in range(S0.space_dimension()): + assert S0.dual.nodes[i].pt_dict == S1.dual.nodes[i].pt_dict