New Upstream Release - fenics-ffcx

Ready changes

Summary

Merged new upstream version: 0.6.0 (was: 0.5.0).

Diff

diff --git a/.github/workflows/build-wheels.yml b/.github/workflows/build-wheels.yml
index 0cab006..b8795af 100644
--- a/.github/workflows/build-wheels.yml
+++ b/.github/workflows/build-wheels.yml
@@ -40,7 +40,7 @@ jobs:
     runs-on: ubuntu-latest
     steps:
       - name: Checkout FFCx
-        uses: actions/checkout@v2
+        uses: actions/checkout@v3
         with:
           ref: ${{ github.event.inputs.ffcx_ref }}
 
@@ -59,13 +59,13 @@ jobs:
     needs: [build]
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/download-artifact@v2
+      - uses: actions/download-artifact@v3
         with:
           name: artifact
           path: dist
 
       - name: Publish to PyPI
-        uses: pypa/gh-action-pypi-publish@v1.5.0
+        uses: pypa/gh-action-pypi-publish@v1.5.1
         if: ${{ github.event.inputs.pypi_publish == 'true' }}
         with:
           user: __token__
@@ -73,7 +73,7 @@ jobs:
           repository_url: https://upload.pypi.org/legacy/
 
       - name: Publish to Test PyPI
-        uses: pypa/gh-action-pypi-publish@v1.5.0
+        uses: pypa/gh-action-pypi-publish@v1.5.1
         if: ${{ github.event.inputs.test_pypi_publish == 'true' }}
         with:
           user: __token__
diff --git a/.github/workflows/dolfin-tests.yml b/.github/workflows/dolfin-tests.yml
index 388cdb4..6b04002 100644
--- a/.github/workflows/dolfin-tests.yml
+++ b/.github/workflows/dolfin-tests.yml
@@ -41,7 +41,7 @@ jobs:
       OMPI_MCA_hwloc_base_binding_policy: none
 
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       - name: Install dependencies (Python)
         run: |
           python3 -m pip install --upgrade pip
@@ -63,13 +63,13 @@ jobs:
 
       - name: Get DOLFINx source (default branch/tag)
         if: github.event_name != 'workflow_dispatch'
-        uses: actions/checkout@v2
+        uses: actions/checkout@v3
         with:
           path: ./dolfinx
           repository: FEniCS/dolfinx
       - name: Get DOLFINx source (specified branch/tag)
         if: github.event_name == 'workflow_dispatch'
-        uses: actions/checkout@v2
+        uses: actions/checkout@v3
         with:
           path: ./dolfinx
           repository: FEniCS/dolfinx
diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml
index 2014df2..b4778ed 100644
--- a/.github/workflows/pythonapp.yml
+++ b/.github/workflows/pythonapp.yml
@@ -14,13 +14,15 @@ on:
     branches:
       - main
 
+  workflow_dispatch:
+
 jobs:
   build:
     runs-on: ${{ matrix.os }}
     strategy:
       matrix:
         os: [ubuntu-latest]
-        python-version: ['3.7', '3.8', '3.9', '3.10']
+        python-version: ['3.7', '3.8', '3.9', '3.10', "3.11"]
 
     env:
       CC: gcc-10
@@ -28,10 +30,10 @@ jobs:
 
     steps:
       - name: Checkout FFCx
-        uses: actions/checkout@v2
+        uses: actions/checkout@v3
 
       - name: Set up Python
-        uses: actions/setup-python@v2
+        uses: actions/setup-python@v4
         with:
           python-version: ${{ matrix.python-version }}
 
@@ -60,6 +62,12 @@ jobs:
       - name: Static check with mypy
         run: |
           python -m mypy ffcx/
+        if: matrix.python-version != '3.11'
+
+      - name: isort checks (non-blocking)
+        continue-on-error: true
+        run: |
+          python3 -m isort --check .
 
       - name: Check documentation style
         run: |
@@ -78,7 +86,7 @@ jobs:
         continue-on-error: true
 
       - name: Upload pytest results
-        uses: actions/upload-artifact@main
+        uses: actions/upload-artifact@v3
         with:
           name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }}
           path: junit/test-results-${{ matrix.os }}-${{ matrix.python-version }}.xml
@@ -87,7 +95,7 @@ jobs:
         if: always()
 
       - name: Get UFL
-        uses: actions/checkout@v2
+        uses: actions/checkout@v3
         with:
           path: ./ufl
           repository: FEniCS/ufl
@@ -105,7 +113,7 @@ jobs:
           make html
 
       - name: Upload documentation artifact
-        uses: actions/upload-artifact@v2
+        uses: actions/upload-artifact@v3
         with:
           name: doc-${{ matrix.os }}-${{ matrix.python-version }}
           path: doc/build/html/
@@ -114,7 +122,7 @@ jobs:
 
       - name: Checkout FEniCS/docs
         if: ${{ github.repository == 'FEniCS/ffcx' && ( github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') ) && runner.os == 'Linux' && matrix.python-version == 3.8 }}
-        uses: actions/checkout@v2
+        uses: actions/checkout@v3
         with:
           repository: "FEniCS/docs"
           path: "docs"
diff --git a/.isort.cfg b/.isort.cfg
new file mode 100644
index 0000000..719d27f
--- /dev/null
+++ b/.isort.cfg
@@ -0,0 +1,5 @@
+[settings]
+src_paths = ffcx,test,demo
+known_first_party = basix,ufl
+known_third_party = numpy,pytest
+sections=FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
diff --git a/ChangeLog.rst b/ChangeLog.rst
index 797ff9e..cb6bb49 100644
--- a/ChangeLog.rst
+++ b/ChangeLog.rst
@@ -1,6 +1,24 @@
 Changelog
 =========
 
+
+0.5.0
+-----
+See: https://github.com/FEniCS/ffcx/compare/v0.5.0...v0.4.0 for details
+
+0.4.0
+-----
+See: https://github.com/FEniCS/ffcx/compare/v0.4.0...v0.3.0 for details
+
+0.3.0
+-----
+See: https://github.com/FEniCS/ffcx/compare/v0.3.0...v0.2.0 for details
+
+0.2.0
+-----
+
+- No changes
+
 0.1.0
 -----
 Alpha release of ffcx
diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt
index ecb4fec..7392614 100644
--- a/cmake/CMakeLists.txt
+++ b/cmake/CMakeLists.txt
@@ -1,6 +1,6 @@
-cmake_minimum_required(VERSION 3.16)
+cmake_minimum_required(VERSION 3.19)
 
-project(ufcx VERSION 0.5.0 DESCRIPTION "UFCx interface header for finite element kernels"
+project(ufcx VERSION 0.6.0 DESCRIPTION "UFCx interface header for finite element kernels"
   LANGUAGES C
   HOMEPAGE_URL https://github.com/fenics/ffcx)
 include(GNUInstallDirs)
@@ -23,7 +23,7 @@ install(TARGETS ${PROJECT_NAME}
 include(CMakePackageConfigHelpers)
 write_basic_package_version_file("${PROJECT_NAME}ConfigVersion.cmake"
                                  VERSION ${PROJECT_VERSION}
-                                 COMPATIBILITY SameMinorVersion)
+                                 COMPATIBILITY AnyNewerVersion)
 configure_package_config_file("${PROJECT_NAME}Config.cmake.in" "${PROJECT_NAME}Config.cmake"
   INSTALL_DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake)
 install(EXPORT ${PROJECT_NAME}_Targets FILE ${PROJECT_NAME}Targets.cmake
diff --git a/debian/changelog b/debian/changelog
index 716d9ce..135401f 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,10 @@
+fenics-ffcx (1:0.6.0-1) UNRELEASED; urgency=low
+
+  * New upstream release.
+  * Drop patch fix_constant_expression_PR532.patch, present upstream.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Mon, 12 Jun 2023 05:28:17 -0000
+
 fenics-ffcx (1:0.5.0-3) unstable; urgency=medium
 
   * debian patch fix_constant_expression_PR532.patch applies upstream
diff --git a/debian/patches/fix_constant_expression_PR532.patch b/debian/patches/fix_constant_expression_PR532.patch
deleted file mode 100644
index 476e155..0000000
--- a/debian/patches/fix_constant_expression_PR532.patch
+++ /dev/null
@@ -1,23 +0,0 @@
-From b4c7628c079da1e72e2e8beaea62c82f3b69c534 Mon Sep 17 00:00:00 2001
-From: jorgensd <dokken92@gmail.com>
-Date: Thu, 1 Sep 2022 14:11:17 +0000
-Subject: [PATCH] Resolve https://github.com/FEniCS/dolfinx/issues/2342
-
----
- ffcx/naming.py | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/ffcx/naming.py b/ffcx/naming.py
-index dc05b51a2..80722871f 100644
---- a/ffcx/naming.py
-+++ b/ffcx/naming.py
-@@ -54,7 +54,8 @@ def compute_signature(ufl_objects: typing.List[
-                 domains.append(*arg.ufl_function_space().ufl_domains())
-             for gc in ufl.algorithms.analysis.extract_type(expr, ufl.classes.GeometricQuantity):
-                 domains.append(*gc.ufl_domains())
--
-+            for const in consts:
-+                domains.append(const.ufl_domain())
-             domains = ufl.algorithms.analysis.unique_tuple(domains)
-             rn.update(dict((d, i) for i, d in enumerate(domains)))
- 
diff --git a/debian/patches/make_doc_clean.patch b/debian/patches/make_doc_clean.patch
index 2e3b5d0..8af4ab4 100644
--- a/debian/patches/make_doc_clean.patch
+++ b/debian/patches/make_doc_clean.patch
@@ -1,8 +1,8 @@
-Index: ffcx/doc/Makefile
+Index: fenics-ffcx.git/doc/Makefile
 ===================================================================
---- ffcx.orig/doc/Makefile	2021-03-09 11:07:15.589349230 +0100
-+++ ffcx/doc/Makefile	2021-03-09 11:14:35.917653550 +0100
-@@ -12,6 +12,9 @@
+--- fenics-ffcx.git.orig/doc/Makefile
++++ fenics-ffcx.git/doc/Makefile
+@@ -12,6 +12,9 @@ BUILDDIR      = build
  help:
  	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
  
diff --git a/debian/patches/series b/debian/patches/series
index dca4c1a..2a8f1fd 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,2 +1 @@
 make_doc_clean.patch
-fix_constant_expression_PR532.patch
diff --git a/demo/test_demos.py b/demo/test_demos.py
index 79937e7..d28e394 100644
--- a/demo/test_demos.py
+++ b/demo/test_demos.py
@@ -1,5 +1,6 @@
 import os
 import sys
+
 import pytest
 
 demo_dir = os.path.dirname(os.path.realpath(__file__))
diff --git a/doc/source/conf.py b/doc/source/conf.py
index d726fe7..bd87b99 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -14,6 +14,7 @@
 # import sys
 # sys.path.insert(0, os.path.abspath('.'))
 import datetime
+
 import ffcx
 
 # -- Project information -----------------------------------------------------
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 1fd44b2..adb2746 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -24,7 +24,7 @@ API reference
    ffcx.main
    ffcx.naming
    ffcx.codegeneration
-   ffcx.parameters
+   ffcx.options
    ffcx.ir.representation
    ffcx.ir.representationutils
 
diff --git a/ffcx/__init__.py b/ffcx/__init__.py
index 335ddff..918c89c 100644
--- a/ffcx/__init__.py
+++ b/ffcx/__init__.py
@@ -14,8 +14,8 @@ import logging
 
 import pkg_resources
 
-# Import default parameters
-from ffcx.parameters import get_parameters  # noqa: F401
+# Import default options
+from ffcx.options import get_options  # noqa: F401
 
 __version__ = pkg_resources.get_distribution("fenics-ffcx").version
 
diff --git a/ffcx/analysis.py b/ffcx/analysis.py
index e6af8fe..ee4ddf8 100644
--- a/ffcx/analysis.py
+++ b/ffcx/analysis.py
@@ -16,10 +16,11 @@ import typing
 
 import numpy
 import numpy.typing
-import ufl
-import basix.ufl_wrapper
 
-from ffcx.element_interface import convert_element
+import basix.ufl_wrapper
+import ufl
+from ffcx.element_interface import convert_element, QuadratureElement
+from warnings import warn
 
 logger = logging.getLogger("ffcx")
 
@@ -34,14 +35,14 @@ class UFLData(typing.NamedTuple):
     expressions: typing.List[typing.Tuple[ufl.core.expr.Expr, numpy.typing.NDArray[numpy.float64], ufl.core.expr.Expr]]
 
 
-def analyze_ufl_objects(ufl_objects: typing.List, parameters: typing.Dict) -> UFLData:
+def analyze_ufl_objects(ufl_objects: typing.List, options: typing.Dict) -> UFLData:
     """Analyze ufl object(s).
 
-    Parameters
+    Options
     ----------
     ufl_objects
-    parameters
-      FFCx parameters. These parameters take priority over all other set parameters.
+    options
+      FFCx options. These options take priority over all other set options.
 
     Returns a data structure holding
     -------
@@ -82,14 +83,14 @@ def analyze_ufl_objects(ufl_objects: typing.List, parameters: typing.Dict) -> UF
         else:
             raise TypeError("UFL objects not recognised.")
 
-    form_data = tuple(_analyze_form(form, parameters) for form in forms)
+    form_data = tuple(_analyze_form(form, options) for form in forms)
     for data in form_data:
         elements += [convert_element(e) for e in data.unique_sub_elements]
         coordinate_elements += [convert_element(e) for e in data.coordinate_elements]
 
     for original_expression, points in expressions:
         elements += [convert_element(e) for e in ufl.algorithms.extract_elements(original_expression)]
-        processed_expression = _analyze_expression(original_expression, parameters)
+        processed_expression = _analyze_expression(original_expression, options)
         processed_expressions += [(processed_expression, points, original_expression)]
 
     elements += ufl.algorithms.analysis.extract_sub_elements(elements)
@@ -108,7 +109,7 @@ def analyze_ufl_objects(ufl_objects: typing.List, parameters: typing.Dict) -> UF
                    unique_coordinate_elements=unique_coordinate_element_list, expressions=processed_expressions)
 
 
-def _analyze_expression(expression: ufl.core.expr.Expr, parameters: typing.Dict):
+def _analyze_expression(expression: ufl.core.expr.Expr, options: typing.Dict):
     """Analyzes and preprocesses expressions."""
     preserve_geometry_types = (ufl.classes.Jacobian, )
     expression = ufl.algorithms.apply_algebra_lowering.apply_algebra_lowering(expression)
@@ -119,20 +120,20 @@ def _analyze_expression(expression: ufl.core.expr.Expr, parameters: typing.Dict)
     expression = ufl.algorithms.apply_geometry_lowering.apply_geometry_lowering(expression, preserve_geometry_types)
     expression = ufl.algorithms.apply_derivatives.apply_derivatives(expression)
 
-    complex_mode = "_Complex" in parameters["scalar_type"]
+    complex_mode = "_Complex" in options["scalar_type"]
     if not complex_mode:
         expression = ufl.algorithms.remove_complex_nodes.remove_complex_nodes(expression)
 
     return expression
 
 
-def _analyze_form(form: ufl.form.Form, parameters: typing.Dict) -> ufl.algorithms.formdata.FormData:
+def _analyze_form(form: ufl.form.Form, options: typing.Dict) -> ufl.algorithms.formdata.FormData:
     """Analyzes UFL form and attaches metadata.
 
-    Parameters
+    Options
     ----------
     form
-    parameters
+    options
 
     Returns
     -------
@@ -141,7 +142,7 @@ def _analyze_form(form: ufl.form.Form, parameters: typing.Dict) -> ufl.algorithm
     Note
     ----
     The main workload of this function is extraction of unique/default metadata
-    from parameters, integral metadata or inherited from UFL
+    from options, integral metadata or inherited from UFL
     (in case of quadrature degree)
 
     """
@@ -153,16 +154,11 @@ def _analyze_form(form: ufl.form.Form, parameters: typing.Dict) -> ufl.algorithm
     # Set default spacing for coordinate elements to be equispaced
     for n, i in enumerate(form._integrals):
         element = i._ufl_domain._ufl_coordinate_element
-        assert not isinstance(element, basix.ufl_wrapper._BasixElementBase)
-        if element._sub_element._variant is None:
-            sub_element = ufl.FiniteElement(
-                element.family(), element.cell(), element.degree(), element.quadrature_scheme(),
-                variant="equispaced")
-            equi_element = ufl.VectorElement(sub_element)
-            form._integrals[n]._ufl_domain._ufl_coordinate_element = equi_element
+        if not isinstance(element, basix.ufl_wrapper._BasixElementBase) and element.degree() > 2:
+            warn("UFL coordinate elements using elements not created via Basix may not work with DOLFINx")
 
     # Check for complex mode
-    complex_mode = "_Complex" in parameters["scalar_type"]
+    complex_mode = "_Complex" in options["scalar_type"]
 
     # Compute form metadata
     form_data = ufl.algorithms.compute_form_data(
@@ -175,6 +171,17 @@ def _analyze_form(form: ufl.form.Form, parameters: typing.Dict) -> ufl.algorithm
         do_append_everywhere_integrals=False,  # do not add dx integrals to dx(i) in UFL
         complex_mode=complex_mode)
 
+    # If form contains a quadrature element, use the custom quadrature scheme
+    custom_q = None
+    for e in form_data.unique_elements:
+        e = convert_element(e)
+        if isinstance(e, QuadratureElement):
+            if custom_q is None:
+                custom_q = e._points, e._weights
+            else:
+                assert numpy.allclose(e._points, custom_q[0])
+                assert numpy.allclose(e._weights, custom_q[1])
+
     # Determine unique quadrature degree, quadrature scheme and
     # precision per each integral data
     for id, integral_data in enumerate(form_data.integral_data):
@@ -206,25 +213,28 @@ def _analyze_form(form: ufl.form.Form, parameters: typing.Dict) -> ufl.algorithm
         qr_default = "default"
 
         for i, integral in enumerate(integral_data.integrals):
-            # Extract quadrature degree
-            qd_metadata = integral.metadata().get("quadrature_degree", qd_default)
-            pd_estimated = numpy.max(integral.metadata()["estimated_polynomial_degree"])
-            if qd_metadata != qd_default:
-                qd = qd_metadata
-            else:
-                qd = pd_estimated
-
-            # Extract quadrature rule
-            qr = integral.metadata().get("quadrature_rule", qr_default)
-
-            logger.info(f"Integral {i}, integral group {id}:")
-            logger.info(f"--- quadrature rule: {qr}")
-            logger.info(f"--- quadrature degree: {qd}")
-            logger.info(f"--- precision: {p}")
-
-            # Update the old metadata
             metadata = integral.metadata()
-            metadata.update({"quadrature_degree": qd, "quadrature_rule": qr, "precision": p})
+            if custom_q is None:
+                # Extract quadrature degree
+                qd_metadata = integral.metadata().get("quadrature_degree", qd_default)
+                pd_estimated = numpy.max(integral.metadata()["estimated_polynomial_degree"])
+                if qd_metadata != qd_default:
+                    qd = qd_metadata
+                else:
+                    qd = pd_estimated
+
+                # Extract quadrature rule
+                qr = integral.metadata().get("quadrature_rule", qr_default)
+
+                logger.info(f"Integral {i}, integral group {id}:")
+                logger.info(f"--- quadrature rule: {qr}")
+                logger.info(f"--- quadrature degree: {qd}")
+                logger.info(f"--- precision: {p}")
+
+                metadata.update({"quadrature_degree": qd, "quadrature_rule": qr, "precision": p})
+            else:
+                metadata.update({"quadrature_points": custom_q[0], "quadrature_weights": custom_q[1],
+                                 "quadrature_rule": "custom", "precision": p})
 
             integral_data.integrals[i] = integral.reconstruct(metadata=metadata)
 
diff --git a/ffcx/codegeneration/C/cnodes.py b/ffcx/codegeneration/C/cnodes.py
index ee67412..792b0b6 100644
--- a/ffcx/codegeneration/C/cnodes.py
+++ b/ffcx/codegeneration/C/cnodes.py
@@ -8,6 +8,7 @@ import logging
 import numbers
 
 import numpy
+
 from ffcx.codegeneration.C.format_lines import Indented, format_indented_lines
 from ffcx.codegeneration.C.format_value import (format_float, format_int,
                                                 format_value)
@@ -882,7 +883,7 @@ class Call(CExprOperator):
     def __init__(self, function, arguments=None):
         self.function = as_cexpr_or_string_symbol(function)
 
-        # Accept None, single, or multple arguments; literals or CExprs
+        # Accept None, single, or multiple arguments; literals or CExprs
         if arguments is None:
             arguments = ()
         elif not isinstance(arguments, (tuple, list)):
@@ -905,7 +906,7 @@ def Sqrt(x):
     return Call("sqrt", x)
 
 
-# Convertion function to expression nodes
+# Conversion function to expression nodes
 
 
 def _is_zero_valued(values):
diff --git a/ffcx/codegeneration/C/format_value.py b/ffcx/codegeneration/C/format_value.py
index 107f0e6..0f057a3 100644
--- a/ffcx/codegeneration/C/format_value.py
+++ b/ffcx/codegeneration/C/format_value.py
@@ -35,7 +35,7 @@ def format_int(x, precision=None):
 
 
 def format_value(value, precision=None):
-    """Format a literal value as s tring.
+    """Format a literal value as string.
 
     - float: Formatted according to current precision configuration.
 
diff --git a/ffcx/codegeneration/__init__.py b/ffcx/codegeneration/__init__.py
index 75dd8b7..c974111 100644
--- a/ffcx/codegeneration/__init__.py
+++ b/ffcx/codegeneration/__init__.py
@@ -1,5 +1,5 @@
-import os
 import hashlib
+import os
 
 # Version of FFCx header files
 __author__ = "FEniCS Project"
diff --git a/ffcx/codegeneration/access.py b/ffcx/codegeneration/access.py
index b1b4d28..c30f224 100644
--- a/ffcx/codegeneration/access.py
+++ b/ffcx/codegeneration/access.py
@@ -9,8 +9,8 @@ import logging
 import warnings
 
 import ufl
-from ffcx.element_interface import create_element
-from ufl.finiteelement import MixedElement
+from basix.ufl_wrapper import BlockedElement
+from ffcx.element_interface import convert_element, create_element
 
 logger = logging.getLogger("ffcx")
 
@@ -18,14 +18,14 @@ logger = logging.getLogger("ffcx")
 class FFCXBackendAccess(object):
     """FFCx specific cpp formatter class."""
 
-    def __init__(self, ir, language, symbols, parameters):
+    def __init__(self, ir, language, symbols, options):
 
-        # Store ir and parameters
+        # Store ir and options
         self.entitytype = ir.entitytype
         self.integral_type = ir.integral_type
         self.language = language
         self.symbols = symbols
-        self.parameters = parameters
+        self.options = options
 
         # Lookup table for handler to call when the "get" method (below) is
         # called, depending on the first argument type.
@@ -179,7 +179,7 @@ class FFCXBackendAccess(object):
 
     def reference_cell_volume(self, e, mt, tabledata, access):
         L = self.language
-        cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
+        cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname()
         if cellname in ("interval", "triangle", "tetrahedron", "quadrilateral", "hexahedron"):
             return L.Symbol(f"{cellname}_reference_cell_volume")
         else:
@@ -187,7 +187,7 @@ class FFCXBackendAccess(object):
 
     def reference_facet_volume(self, e, mt, tabledata, access):
         L = self.language
-        cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
+        cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname()
         if cellname in ("interval", "triangle", "tetrahedron", "quadrilateral", "hexahedron"):
             return L.Symbol(f"{cellname}_reference_facet_volume")
         else:
@@ -195,7 +195,7 @@ class FFCXBackendAccess(object):
 
     def reference_normal(self, e, mt, tabledata, access):
         L = self.language
-        cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
+        cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname()
         if cellname in ("interval", "triangle", "tetrahedron", "quadrilateral", "hexahedron"):
             table = L.Symbol(f"{cellname}_reference_facet_normals")
             facet = self.symbols.entity("facet", mt.restriction)
@@ -205,7 +205,7 @@ class FFCXBackendAccess(object):
 
     def cell_facet_jacobian(self, e, mt, tabledata, num_points):
         L = self.language
-        cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
+        cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname()
         if cellname in ("triangle", "tetrahedron", "quadrilateral", "hexahedron"):
             table = L.Symbol(f"{cellname}_reference_facet_jacobian")
             facet = self.symbols.entity("facet", mt.restriction)
@@ -217,7 +217,7 @@ class FFCXBackendAccess(object):
 
     def reference_cell_edge_vectors(self, e, mt, tabledata, num_points):
         L = self.language
-        cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
+        cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname()
         if cellname in ("triangle", "tetrahedron", "quadrilateral", "hexahedron"):
             table = L.Symbol(f"{cellname}_reference_edge_vectors")
             return table[mt.component[0]][mt.component[1]]
@@ -228,7 +228,7 @@ class FFCXBackendAccess(object):
 
     def reference_facet_edge_vectors(self, e, mt, tabledata, num_points):
         L = self.language
-        cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
+        cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname()
         if cellname in ("tetrahedron", "hexahedron"):
             table = L.Symbol(f"{cellname}_reference_edge_vectors")
             facet = self.symbols.entity("facet", mt.restriction)
@@ -242,7 +242,7 @@ class FFCXBackendAccess(object):
 
     def facet_orientation(self, e, mt, tabledata, num_points):
         L = self.language
-        cellname = mt.terminal.ufl_domain().ufl_cell().cellname()
+        cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname()
         if cellname not in ("interval", "triangle", "tetrahedron"):
             raise RuntimeError(f"Unhandled cell types {cellname}.")
 
@@ -252,19 +252,19 @@ class FFCXBackendAccess(object):
 
     def cell_vertices(self, e, mt, tabledata, num_points):
         # Get properties of domain
-        domain = mt.terminal.ufl_domain()
+        domain = ufl.domain.extract_unique_domain(mt.terminal)
         gdim = domain.geometric_dimension()
-        coordinate_element = domain.ufl_coordinate_element()
+        coordinate_element = convert_element(domain.ufl_coordinate_element())
 
         # Get dimension and dofmap of scalar element
-        assert isinstance(coordinate_element, MixedElement)
+        assert isinstance(coordinate_element, BlockedElement)
         assert coordinate_element.value_shape() == (gdim, )
         ufl_scalar_element, = set(coordinate_element.sub_elements())
-        assert ufl_scalar_element.family() in ("Lagrange", "Q", "S")
+        scalar_element = create_element(ufl_scalar_element)
+        assert scalar_element.value_size == 1 and scalar_element.block_size == 1
 
-        basix_scalar_element = create_element(ufl_scalar_element)
-        vertex_scalar_dofs = basix_scalar_element.entity_dofs[0]
-        num_scalar_dofs = basix_scalar_element.dim
+        vertex_scalar_dofs = scalar_element.entity_dofs[0]
+        num_scalar_dofs = scalar_element.dim
 
         # Get dof and component
         dof, = vertex_scalar_dofs[mt.component[0]]
@@ -275,10 +275,10 @@ class FFCXBackendAccess(object):
 
     def cell_edge_vectors(self, e, mt, tabledata, num_points):
         # Get properties of domain
-        domain = mt.terminal.ufl_domain()
+        domain = ufl.domain.extract_unique_domain(mt.terminal)
         cellname = domain.ufl_cell().cellname()
         gdim = domain.geometric_dimension()
-        coordinate_element = domain.ufl_coordinate_element()
+        coordinate_element = convert_element(domain.ufl_coordinate_element())
 
         if cellname in ("triangle", "tetrahedron", "quadrilateral", "hexahedron"):
             pass
@@ -288,18 +288,18 @@ class FFCXBackendAccess(object):
             raise RuntimeError(f"Unhandled cell types {cellname}.")
 
         # Get dimension and dofmap of scalar element
-        assert isinstance(coordinate_element, MixedElement)
+        assert isinstance(coordinate_element, BlockedElement)
         assert coordinate_element.value_shape() == (gdim, )
         ufl_scalar_element, = set(coordinate_element.sub_elements())
-        assert ufl_scalar_element.family() in ("Lagrange", "Q", "S")
+        scalar_element = create_element(ufl_scalar_element)
+        assert scalar_element.value_size == 1 and scalar_element.block_size == 1
 
-        basix_scalar_element = create_element(ufl_scalar_element)
-        vertex_scalar_dofs = basix_scalar_element.entity_dofs[0]
-        num_scalar_dofs = basix_scalar_element.dim
+        vertex_scalar_dofs = scalar_element.entity_dofs[0]
+        num_scalar_dofs = scalar_element.dim
 
         # Get edge vertices
         edge = mt.component[0]
-        vertex0, vertex1 = basix_scalar_element.reference_topology[1][edge]
+        vertex0, vertex1 = scalar_element.reference_topology[1][edge]
 
         # Get dofs and component
         dof0, = vertex_scalar_dofs[vertex0]
@@ -316,10 +316,10 @@ class FFCXBackendAccess(object):
         L = self.language
 
         # Get properties of domain
-        domain = mt.terminal.ufl_domain()
+        domain = ufl.domain.extract_unique_domain(mt.terminal)
         cellname = domain.ufl_cell().cellname()
         gdim = domain.geometric_dimension()
-        coordinate_element = domain.ufl_coordinate_element()
+        coordinate_element = convert_element(domain.ufl_coordinate_element())
 
         if cellname in ("tetrahedron", "hexahedron"):
             pass
@@ -330,13 +330,14 @@ class FFCXBackendAccess(object):
             raise RuntimeError(f"Unhandled cell types {cellname}.")
 
         # Get dimension and dofmap of scalar element
-        assert isinstance(coordinate_element, MixedElement)
+        assert isinstance(coordinate_element, BlockedElement)
         assert coordinate_element.value_shape() == (gdim, )
         ufl_scalar_element, = set(coordinate_element.sub_elements())
-        assert ufl_scalar_element.family() in ("Lagrange", "Q", "S")
+        scalar_element = create_element(ufl_scalar_element)
+        assert scalar_element.value_size == 1 and scalar_element.block_size == 1
 
-        basix_scalar_element = create_element(ufl_scalar_element)
-        num_scalar_dofs = basix_scalar_element.dim
+        scalar_element = create_element(ufl_scalar_element)
+        num_scalar_dofs = scalar_element.dim
 
         # Get edge vertices
         facet = self.symbols.entity("facet", mt.restriction)
diff --git a/ffcx/codegeneration/backend.py b/ffcx/codegeneration/backend.py
index 14999e8..0b9c5d8 100644
--- a/ffcx/codegeneration/backend.py
+++ b/ffcx/codegeneration/backend.py
@@ -5,6 +5,8 @@
 # SPDX-License-Identifier:    LGPL-3.0-or-later
 """Collection of FFCx specific pieces for the code generation phase."""
 
+import types
+
 import ffcx.codegeneration.C.cnodes
 from ffcx.codegeneration.access import FFCXBackendAccess
 from ffcx.codegeneration.C.ufl_to_cnodes import UFL2CNodesTranslatorCpp
@@ -15,11 +17,11 @@ from ffcx.codegeneration.symbols import FFCXBackendSymbols
 class FFCXBackend(object):
     """Class collecting all aspects of the FFCx backend."""
 
-    def __init__(self, ir, parameters):
+    def __init__(self, ir, options):
 
         # This is the seam where cnodes/C is chosen for the FFCx backend
-        self.language = ffcx.codegeneration.C.cnodes
-        scalar_type = parameters["scalar_type"]
+        self.language: types.ModuleType = ffcx.codegeneration.C.cnodes
+        scalar_type = options["scalar_type"]
         self.ufl_to_language = UFL2CNodesTranslatorCpp(self.language, scalar_type)
 
         coefficient_numbering = ir.coefficient_numbering
@@ -30,6 +32,6 @@ class FFCXBackend(object):
         self.symbols = FFCXBackendSymbols(self.language, coefficient_numbering,
                                           coefficient_offsets, original_constant_offsets)
         self.definitions = FFCXBackendDefinitions(ir, self.language,
-                                                  self.symbols, parameters)
+                                                  self.symbols, options)
         self.access = FFCXBackendAccess(ir, self.language, self.symbols,
-                                        parameters)
+                                        options)
diff --git a/ffcx/codegeneration/basix_custom_element_template.py b/ffcx/codegeneration/basix_custom_element_template.py
index 2470715..f5fb55c 100644
--- a/ffcx/codegeneration/basix_custom_element_template.py
+++ b/ffcx/codegeneration/basix_custom_element_template.py
@@ -26,6 +26,7 @@ ufcx_basix_custom_finite_element {factory_name} =
   .x = {x},
   .M = {M},
   .map_type = {map_type},
+  .sobolev_space = {sobolev_space},
   .discontinuous = {discontinuous},
   .highest_complete_degree = {highest_complete_degree},
   .interpolation_nderivs = {interpolation_nderivs},
diff --git a/ffcx/codegeneration/codegeneration.py b/ffcx/codegeneration/codegeneration.py
index c74927f..fb5bbd2 100644
--- a/ffcx/codegeneration/codegeneration.py
+++ b/ffcx/codegeneration/codegeneration.py
@@ -38,17 +38,17 @@ class CodeBlocks(typing.NamedTuple):
     expressions: typing.List[typing.Tuple[str, str]]
 
 
-def generate_code(ir, parameters) -> CodeBlocks:
+def generate_code(ir, options) -> CodeBlocks:
     """Generate code blocks from intermediate representation."""
     logger.info(79 * "*")
     logger.info("Compiler stage 3: Generating code")
     logger.info(79 * "*")
 
     # Generate code for finite_elements
-    code_finite_elements = [finite_element_generator(element_ir, parameters) for element_ir in ir.elements]
-    code_dofmaps = [dofmap_generator(dofmap_ir, parameters) for dofmap_ir in ir.dofmaps]
-    code_integrals = [integral_generator(integral_ir, parameters) for integral_ir in ir.integrals]
-    code_forms = [form_generator(form_ir, parameters) for form_ir in ir.forms]
-    code_expressions = [expression_generator(expression_ir, parameters) for expression_ir in ir.expressions]
+    code_finite_elements = [finite_element_generator(element_ir, options) for element_ir in ir.elements]
+    code_dofmaps = [dofmap_generator(dofmap_ir, options) for dofmap_ir in ir.dofmaps]
+    code_integrals = [integral_generator(integral_ir, options) for integral_ir in ir.integrals]
+    code_forms = [form_generator(form_ir, options) for form_ir in ir.forms]
+    code_expressions = [expression_generator(expression_ir, options) for expression_ir in ir.expressions]
     return CodeBlocks(elements=code_finite_elements, dofmaps=code_dofmaps,
                       integrals=code_integrals, forms=code_forms, expressions=code_expressions)
diff --git a/ffcx/codegeneration/definitions.py b/ffcx/codegeneration/definitions.py
index b5a804e..1ea67f5 100644
--- a/ffcx/codegeneration/definitions.py
+++ b/ffcx/codegeneration/definitions.py
@@ -17,13 +17,13 @@ logger = logging.getLogger("ffcx")
 class FFCXBackendDefinitions(object):
     """FFCx specific code definitions."""
 
-    def __init__(self, ir, language, symbols, parameters):
-        # Store ir and parameters
+    def __init__(self, ir, language, symbols, options):
+        # Store ir and options
         self.integral_type = ir.integral_type
         self.entitytype = ir.entitytype
         self.language = language
         self.symbols = symbols
-        self.parameters = parameters
+        self.options = options
 
         self.ir = ir
 
@@ -99,14 +99,14 @@ class FFCXBackendDefinitions(object):
 
             # If a map is necessary from stride 1 to bs, the code must be added before the quadrature loop.
             if dof_access_map:
-                pre_code += [L.ArrayDecl(self.parameters["scalar_type"], dof_access.array, num_dofs)]
+                pre_code += [L.ArrayDecl(self.options["scalar_type"], dof_access.array, num_dofs)]
                 pre_body = L.Assign(dof_access, dof_access_map)
                 pre_code += [L.ForRange(ic, 0, num_dofs, pre_body)]
         else:
             dof_access = self.symbols.coefficient_dof_access(mt.terminal, ic * bs + begin)
 
         body = [L.AssignAdd(access, dof_access * FE[ic])]
-        code += [L.VariableDecl(self.parameters["scalar_type"], access, 0.0)]
+        code += [L.VariableDecl(self.options["scalar_type"], access, 0.0)]
         code += [L.ForRange(ic, 0, num_dofs, body)]
 
         return pre_code, code
@@ -122,7 +122,7 @@ class FFCXBackendDefinitions(object):
         L = self.language
 
         # Get properties of domain
-        domain = mt.terminal.ufl_domain()
+        domain = ufl.domain.extract_unique_domain(mt.terminal)
         coordinate_element = domain.ufl_coordinate_element()
         num_scalar_dofs = create_element(coordinate_element).sub_element.dim
 
@@ -148,7 +148,7 @@ class FFCXBackendDefinitions(object):
         if mt.restriction == "-":
             offset = num_scalar_dofs * dim
 
-        value_type = scalar_to_value_type(self.parameters["scalar_type"])
+        value_type = scalar_to_value_type(self.options["scalar_type"])
 
         code = []
         body = [L.AssignAdd(access, dof_access[ic * dim + begin + offset] * FE[ic])]
diff --git a/ffcx/codegeneration/dofmap.py b/ffcx/codegeneration/dofmap.py
index 0f17512..d8bfc9d 100644
--- a/ffcx/codegeneration/dofmap.py
+++ b/ffcx/codegeneration/dofmap.py
@@ -12,7 +12,6 @@ import typing
 
 import ffcx.codegeneration.dofmap_template as ufcx_dofmap
 
-
 logger = logging.getLogger("ffcx")
 
 
@@ -55,7 +54,7 @@ def tabulate_entity_dofs(L, entity_dofs: typing.List[typing.List[typing.List[int
         return L.NoOp()
 
 
-def generator(ir, parameters):
+def generator(ir, options):
     """Generate UFC code for a dofmap."""
     logger.info("Generating code for dofmap:")
     logger.info(f"--- num element support dofs: {ir.num_element_support_dofs}")
@@ -105,7 +104,7 @@ def generator(ir, parameters):
     # Remove square brackets from any field names
     fields = [f.split("[")[0] for f in fields]
     assert set(fields) == set(
-        d.keys()), "Mismatch between keys in template and in formattting dict."
+        d.keys()), "Mismatch between keys in template and in formatting dict."
 
     # Format implementation code
     implementation = ufcx_dofmap.factory.format_map(d)
diff --git a/ffcx/codegeneration/expressions.py b/ffcx/codegeneration/expressions.py
index 63e80da..330b65d 100644
--- a/ffcx/codegeneration/expressions.py
+++ b/ffcx/codegeneration/expressions.py
@@ -20,7 +20,7 @@ from ffcx.naming import cdtype_to_numpy, scalar_to_value_type
 logger = logging.getLogger("ffcx")
 
 
-def generator(ir, parameters):
+def generator(ir, options):
     """Generate UFC code for an expression."""
     logger.info("Generating code for expression:")
     logger.info(f"--- points: {ir.points}")
@@ -32,7 +32,7 @@ def generator(ir, parameters):
     declaration = expressions_template.declaration.format(
         factory_name=factory_name, name_from_uflfile=ir.name_from_uflfile)
 
-    backend = FFCXBackend(ir, parameters)
+    backend = FFCXBackend(ir, options)
     L = backend.language
     eg = ExpressionGenerator(ir, backend)
 
@@ -71,9 +71,9 @@ def generator(ir, parameters):
     d["num_constants"] = len(ir.constant_names)
     d["num_points"] = ir.points.shape[0]
     d["topological_dimension"] = ir.points.shape[1]
-    d["scalar_type"] = parameters["scalar_type"]
-    d["geom_type"] = scalar_to_value_type(parameters["scalar_type"])
-    d["np_scalar_type"] = cdtype_to_numpy(parameters["scalar_type"])
+    d["scalar_type"] = options["scalar_type"]
+    d["geom_type"] = scalar_to_value_type(options["scalar_type"])
+    d["np_scalar_type"] = cdtype_to_numpy(options["scalar_type"])
 
     d["rank"] = len(ir.tensor_shape)
 
@@ -124,7 +124,7 @@ def generator(ir, parameters):
     # Check that no keys are redundant or have been missed
     from string import Formatter
     fields = [fname for _, fname, _, _ in Formatter().parse(expressions_template.factory) if fname]
-    assert set(fields) == set(d.keys()), "Mismatch between keys in template and in formattting dict"
+    assert set(fields) == set(d.keys()), "Mismatch between keys in template and in formatting dict"
 
     # Format implementation code
     implementation = expressions_template.factory.format_map(d)
@@ -150,10 +150,12 @@ class ExpressionGenerator:
         L = self.backend.language
 
         parts = []
+        scalar_type = self.backend.access.options["scalar_type"]
+        value_type = scalar_to_value_type(scalar_type)
 
-        parts += self.generate_element_tables()
+        parts += self.generate_element_tables(value_type)
         # Generate the tables of geometry data that are needed
-        parts += self.generate_geometry_tables()
+        parts += self.generate_geometry_tables(value_type)
         parts += self.generate_piecewise_partition()
 
         all_preparts = []
@@ -169,15 +171,15 @@ class ExpressionGenerator:
 
         return L.StatementList(parts)
 
-    def generate_geometry_tables(self):
+    def generate_geometry_tables(self, float_type: str):
         """Generate static tables of geometry data."""
         L = self.backend.language
 
         # Currently we only support circumradius
         ufl_geometry = {
-            ufl.geometry.ReferenceCellVolume: "reference_cell_volume"
+            ufl.geometry.ReferenceCellVolume: "reference_cell_volume",
         }
-        cells = {t: set() for t in ufl_geometry.keys()}
+        cells: Dict[Any, Set[Any]] = {t: set() for t in ufl_geometry.keys()}
 
         for integrand in self.ir.integrand.values():
             for attr in integrand["factorization"].nodes.values():
@@ -185,31 +187,29 @@ class ExpressionGenerator:
                 if mt is not None:
                     t = type(mt.terminal)
                     if t in ufl_geometry:
-                        cells[t].add(mt.terminal.ufl_domain().ufl_cell().cellname())
+                        cells[t].add(ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname())
 
         parts = []
         for i, cell_list in cells.items():
             for c in cell_list:
-                parts.append(geometry.write_table(L, ufl_geometry[i], c))
+                parts.append(geometry.write_table(L, ufl_geometry[i], c, float_type))
 
         return parts
 
-    def generate_element_tables(self):
+    def generate_element_tables(self, float_type: str):
         """Generate tables of FE basis evaluated at specified points."""
         L = self.backend.language
         parts = []
 
         tables = self.ir.unique_tables
 
-        padlen = self.ir.params["padlen"]
+        padlen = self.ir.options["padlen"]
         table_names = sorted(tables)
 
-        scalar_type = self.backend.access.parameters["scalar_type"]
-
         for name in table_names:
             table = tables[name]
             decl = L.ArrayDecl(
-                f"static const {scalar_type}", name, table.shape, table, padlen=padlen)
+                f"static const {float_type}", name, table.shape, table, padlen=padlen)
             parts += [decl]
 
         # Add leading comment if there are any tables
@@ -263,7 +263,7 @@ class ExpressionGenerator:
         return parts
 
     def generate_piecewise_partition(self):
-        """Generate factors of blocks which are constant (i.e. do not depent on quadrature points)."""
+        """Generate factors of blocks which are constant (i.e. do not depend on quadrature points)."""
         L = self.backend.language
 
         # Get annotated graph of factorisation
@@ -393,7 +393,7 @@ class ExpressionGenerator:
     def get_arg_factors(self, blockdata, block_rank, indices):
         """Get argument factors (i.e. blocks).
 
-        Parameters
+        Options
         ----------
         blockdata
         block_rank
@@ -504,7 +504,7 @@ class ExpressionGenerator:
                         vaccess = symbol[j]
                         intermediates.append(L.Assign(vaccess, vexpr))
                     else:
-                        scalar_type = self.backend.access.parameters["scalar_type"]
+                        scalar_type = self.backend.access.options["scalar_type"]
                         vaccess = L.Symbol("%s_%d" % (symbol.name, j))
                         intermediates.append(L.VariableDecl(f"const {scalar_type}", vaccess, vexpr))
 
@@ -523,7 +523,7 @@ class ExpressionGenerator:
 
         if intermediates:
             if use_symbol_array:
-                scalar_type = self.backend.access.parameters["scalar_type"]
+                scalar_type = self.backend.access.options["scalar_type"]
                 parts += [L.ArrayDecl(scalar_type, symbol, len(intermediates))]
             parts += intermediates
         return parts
diff --git a/ffcx/codegeneration/finite_element.py b/ffcx/codegeneration/finite_element.py
index 9b1faa6..1a2fb71 100644
--- a/ffcx/codegeneration/finite_element.py
+++ b/ffcx/codegeneration/finite_element.py
@@ -10,15 +10,15 @@
 
 import logging
 
-import ffcx.codegeneration.finite_element_template as ufcx_finite_element
 import ffcx.codegeneration.basix_custom_element_template as ufcx_basix_custom_finite_element
+import ffcx.codegeneration.finite_element_template as ufcx_finite_element
 import ufl
 
 logger = logging.getLogger("ffcx")
 index_type = "int"
 
 
-def generator(ir, parameters):
+def generator(ir, options):
     """Generate UFC code for a finite element."""
     logger.info("Generating code for finite element:")
     logger.info(f"--- family: {ir.family}")
@@ -104,7 +104,7 @@ def generator(ir, parameters):
         fname for _, fname, _, _ in Formatter().parse(ufcx_finite_element.factory) if fname
     ]
     assert set(fieldnames) == set(
-        d.keys()), "Mismatch between keys in template and in formattting dict"
+        d.keys()), "Mismatch between keys in template and in formatting dict"
 
     # Format implementation code
     implementation = ufcx_finite_element.factory.format_map(d)
@@ -120,6 +120,7 @@ def generate_custom_element(name, ir):
     d["factory_name"] = name
     d["cell_type"] = int(ir.cell_type)
     d["map_type"] = int(ir.map_type)
+    d["sobolev_space"] = int(ir.sobolev_space)
     d["highest_complete_degree"] = ir.highest_complete_degree
     d["highest_degree"] = ir.highest_degree
     d["discontinuous"] = "true" if ir.discontinuous else "false"
@@ -180,7 +181,7 @@ def generate_custom_element(name, ir):
         fname for _, fname, _, _ in Formatter().parse(ufcx_basix_custom_finite_element.factory) if fname
     ]
     assert set(fieldnames) == set(
-        d.keys()), "Mismatch between keys in template and in formattting dict"
+        d.keys()), "Mismatch between keys in template and in formatting dict"
 
     # Format implementation code
     implementation = ufcx_basix_custom_finite_element.factory.format_map(d)
diff --git a/ffcx/codegeneration/flop_count.py b/ffcx/codegeneration/flop_count.py
index 6e160d7..dc2a140 100644
--- a/ffcx/codegeneration/flop_count.py
+++ b/ffcx/codegeneration/flop_count.py
@@ -5,7 +5,7 @@
 
 from typing import Optional
 
-import ffcx.parameters
+import ffcx.options
 import ufl
 from ffcx.analysis import analyze_ufl_objects
 from ffcx.codegeneration.backend import FFCXBackend
@@ -13,18 +13,18 @@ from ffcx.codegeneration.integrals import IntegralGenerator
 from ffcx.ir.representation import compute_ir
 
 
-def count_flops(form: ufl.Form, parameters: Optional[dict] = {}):
+def count_flops(form: ufl.Form, options: Optional[dict] = {}):
     """Return a list with the number of flops for each kernel in the Form."""
-    parameters = ffcx.parameters.get_parameters(parameters)
+    options = ffcx.options.get_options(options)
     assert isinstance(form, ufl.Form)
-    analysis = analyze_ufl_objects([form], parameters)
-    ir = compute_ir(analysis, {}, "flops", parameters, False)
+    analysis = analyze_ufl_objects([form], options)
+    ir = compute_ir(analysis, {}, "flops", options, False)
 
     flops = []
 
     for integral_ir in ir.integrals:
         # Create FFCx C backend
-        backend = FFCXBackend(integral_ir, parameters)
+        backend = FFCXBackend(integral_ir, options)
         # Configure kernel generator
         ig = IntegralGenerator(integral_ir, backend)
         # Generate code ast for the tabulate_tensor body
diff --git a/ffcx/codegeneration/form.py b/ffcx/codegeneration/form.py
index 80ec501..a2bac06 100644
--- a/ffcx/codegeneration/form.py
+++ b/ffcx/codegeneration/form.py
@@ -14,7 +14,7 @@ from ffcx.codegeneration import form_template
 logger = logging.getLogger("ffcx")
 
 
-def generator(ir, parameters):
+def generator(ir, options):
     """Generate UFC code for a form."""
     logger.info("Generating code for form:")
     logger.info(f"--- rank: {ir.rank}")
@@ -134,7 +134,7 @@ def generator(ir, parameters):
     # Check that no keys are redundant or have been missed
     from string import Formatter
     fields = [fname for _, fname, _, _ in Formatter().parse(form_template.factory) if fname]
-    assert set(fields) == set(d.keys()), "Mismatch between keys in template and in formattting dict"
+    assert set(fields) == set(d.keys()), "Mismatch between keys in template and in formatting dict"
 
     # Format implementation code
     implementation = form_template.factory.format_map(d)
diff --git a/ffcx/codegeneration/geometry.py b/ffcx/codegeneration/geometry.py
index 113e00c..090f0bb 100644
--- a/ffcx/codegeneration/geometry.py
+++ b/ffcx/codegeneration/geometry.py
@@ -4,9 +4,10 @@
 #
 # SPDX-License-Identifier:    LGPL-3.0-or-later
 
-import basix
 import numpy
 
+import basix
+
 
 def write_table(L, tablename, cellname, type: str):
     if tablename == "facet_edge_vertices":
diff --git a/ffcx/codegeneration/integrals.py b/ffcx/codegeneration/integrals.py
index d9e75c3..cd2c105 100644
--- a/ffcx/codegeneration/integrals.py
+++ b/ffcx/codegeneration/integrals.py
@@ -6,7 +6,7 @@
 
 import collections
 import logging
-from typing import List, Tuple
+from typing import Any, Dict, List, Set, Tuple
 
 import ufl
 from ffcx.codegeneration import geometry
@@ -22,7 +22,7 @@ from ffcx.naming import cdtype_to_numpy, scalar_to_value_type
 logger = logging.getLogger("ffcx")
 
 
-def generator(ir, parameters):
+def generator(ir, options):
     logger.info("Generating code for integral:")
     logger.info(f"--- type: {ir.integral_type}")
     logger.info(f"--- name: {ir.name}")
@@ -34,7 +34,7 @@ def generator(ir, parameters):
     declaration = ufcx_integrals.declaration.format(factory_name=factory_name)
 
     # Create FFCx C backend
-    backend = FFCXBackend(ir, parameters)
+    backend = FFCXBackend(ir, options)
 
     # Configure kernel generator
     ig = IntegralGenerator(ir, backend)
@@ -68,7 +68,7 @@ def generator(ir, parameters):
     code["additional_includes_set"] = set()  # FIXME: Get this out of code[]
     code["tabulate_tensor"] = body
 
-    if parameters["tabulate_tensor_void"]:
+    if options["tabulate_tensor_void"]:
         code["tabulate_tensor"] = ""
 
     implementation = ufcx_integrals.factory.format(
@@ -77,9 +77,9 @@ def generator(ir, parameters):
         enabled_coefficients_init=code["enabled_coefficients_init"],
         tabulate_tensor=code["tabulate_tensor"],
         needs_facet_permutations="true" if ir.needs_facet_permutations else "false",
-        scalar_type=parameters["scalar_type"],
-        geom_type=scalar_to_value_type(parameters["scalar_type"]),
-        np_scalar_type=cdtype_to_numpy(parameters["scalar_type"]),
+        scalar_type=options["scalar_type"],
+        geom_type=scalar_to_value_type(options["scalar_type"]),
+        np_scalar_type=cdtype_to_numpy(options["scalar_type"]),
         coordinate_element=L.AddressOf(L.Symbol(ir.coordinate_element)))
 
     return declaration, implementation
@@ -177,11 +177,11 @@ class IntegralGenerator(object):
         assert not any(d for d in self.scopes.values())
 
         parts = []
-        scalar_type = self.backend.access.parameters["scalar_type"]
+        scalar_type = self.backend.access.options["scalar_type"]
         value_type = scalar_to_value_type(scalar_type)
-        alignment = self.ir.params['assume_aligned']
+        alignment = self.ir.options['assume_aligned']
         if alignment != -1:
-            scalar_type = self.backend.access.parameters["scalar_type"]
+            scalar_type = self.backend.access.options["scalar_type"]
             parts += [L.VerbatimStatement(f"A = ({scalar_type}*)__builtin_assume_aligned(A, {alignment});"),
                       L.VerbatimStatement(f"w = (const {scalar_type}*)__builtin_assume_aligned(w, {alignment});"),
                       L.VerbatimStatement(f"c = (const {scalar_type}*)__builtin_assume_aligned(c, {alignment});"),
@@ -238,7 +238,7 @@ class IntegralGenerator(object):
         if self.ir.integral_type in skip:
             return parts
 
-        padlen = self.ir.params["padlen"]
+        padlen = self.ir.options["padlen"]
 
         # Loop over quadrature rules
         for quadrature_rule, integrand in self.ir.integrand.items():
@@ -253,7 +253,7 @@ class IntegralGenerator(object):
         parts = L.commented_code_list(parts, "Quadrature rules")
         return parts
 
-    def generate_geometry_tables(self, float_type):
+    def generate_geometry_tables(self, float_type: str):
         """Generate static tables of geometry data."""
         L = self.backend.language
 
@@ -267,7 +267,7 @@ class IntegralGenerator(object):
             ufl.geometry.ReferenceNormal: "reference_facet_normals",
             ufl.geometry.FacetOrientation: "facet_orientation"
         }
-        cells = {t: set() for t in ufl_geometry.keys()}
+        cells: Dict[Any, Set[Any]] = {t: set() for t in ufl_geometry.keys()}
 
         for integrand in self.ir.integrand.values():
             for attr in integrand["factorization"].nodes.values():
@@ -275,7 +275,7 @@ class IntegralGenerator(object):
                 if mt is not None:
                     t = type(mt.terminal)
                     if t in ufl_geometry:
-                        cells[t].add(mt.terminal.ufl_domain().ufl_cell().cellname())
+                        cells[t].add(ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname())
 
         parts = []
         for i, cell_list in cells.items():
@@ -290,7 +290,7 @@ class IntegralGenerator(object):
         parts = []
         tables = self.ir.unique_tables
         table_types = self.ir.unique_table_types
-        padlen = self.ir.params["padlen"]
+        padlen = self.ir.options["padlen"]
         if self.ir.integral_type in ufl.custom_integral_types:
             # Define only piecewise tables
             table_names = [name for name in sorted(tables) if table_types[name] in piecewise_ttypes]
@@ -445,7 +445,7 @@ class IntegralGenerator(object):
                             vaccess = symbol[j]
                             intermediates.append(L.Assign(vaccess, vexpr))
                         else:
-                            scalar_type = self.backend.access.parameters["scalar_type"]
+                            scalar_type = self.backend.access.options["scalar_type"]
                             vaccess = L.Symbol("%s_%d" % (symbol.name, j))
                             intermediates.append(L.VariableDecl(f"const {scalar_type}", vaccess, vexpr))
 
@@ -459,8 +459,8 @@ class IntegralGenerator(object):
 
         if intermediates:
             if use_symbol_array:
-                padlen = self.ir.params["padlen"]
-                parts += [L.ArrayDecl(self.backend.access.parameters["scalar_type"],
+                padlen = self.ir.options["padlen"]
+                parts += [L.ArrayDecl(self.backend.access.options["scalar_type"],
                                       symbol, len(intermediates), padlen=padlen)]
             parts += intermediates
         return pre_definitions, parts
@@ -527,7 +527,7 @@ class IntegralGenerator(object):
     def generate_block_parts(self, quadrature_rule: QuadratureRule, blockmap: Tuple, blocklist: List[BlockDataT]):
         """Generate and return code parts for a given block.
 
-        Returns parts occuring before, inside, and after the quadrature
+        Returns parts occurring before, inside, and after the quadrature
         loop identified by the quadrature rule.
 
         Should be called with quadrature_rule=None for
@@ -539,7 +539,7 @@ class IntegralGenerator(object):
         preparts: List[CNode] = []
         quadparts: List[CNode] = []
 
-        # RHS expressiong grouped by LHS "dofmap"
+        # RHS expressions grouped by LHS "dofmap"
         rhs_expressions = collections.defaultdict(list)
 
         block_rank = len(blockmap)
@@ -589,7 +589,7 @@ class IntegralGenerator(object):
                 key = (quadrature_rule, factor_index, blockdata.all_factors_piecewise)
                 fw, defined = self.get_temp_symbol("fw", key)
                 if not defined:
-                    scalar_type = self.backend.access.parameters["scalar_type"]
+                    scalar_type = self.backend.access.options["scalar_type"]
                     quadparts.append(L.VariableDecl(f"const {scalar_type}", fw, fw_rhs))
 
             assert not blockdata.transposed, "Not handled yet"
@@ -657,7 +657,7 @@ class IntegralGenerator(object):
                         keep[indices].append(L.float_product([statement, lhs]))
                     else:
                         t = self.new_temp_symbol("t")
-                        scalar_type = self.backend.access.parameters["scalar_type"]
+                        scalar_type = self.backend.access.options["scalar_type"]
                         pre_loop.append(L.ArrayDecl(scalar_type, t, blockdims[0]))
                         keep[indices].append(L.float_product([statement, t[B_indices[0]]]))
                         hoist.append(L.Assign(t[B_indices[i - 1]], sum))
diff --git a/ffcx/codegeneration/jit.py b/ffcx/codegeneration/jit.py
index d6ec649..e82cc16 100644
--- a/ffcx/codegeneration/jit.py
+++ b/ffcx/codegeneration/jit.py
@@ -9,12 +9,14 @@ import io
 import logging
 import os
 import re
+import sysconfig
 import tempfile
 import time
 from contextlib import redirect_stdout
 from pathlib import Path
 
 import cffi
+
 import ffcx
 import ffcx.naming
 
@@ -46,9 +48,9 @@ UFC_INTEGRAL_DECL += '\n'.join(re.findall('typedef struct ufcx_integral.*?ufcx_i
 UFC_EXPRESSION_DECL = '\n'.join(re.findall('typedef struct ufcx_expression.*?ufcx_expression;', ufcx_h, re.DOTALL))
 
 
-def _compute_parameter_signature(parameters):
-    """Return parameters signature (some parameters should not affect signature)."""
-    return str(sorted(parameters.items()))
+def _compute_option_signature(options):
+    """Return options signature (some options should not affect signature)."""
+    return str(sorted(options.items()))
 
 
 def get_cached_module(module_name, object_names, cache_dir, timeout):
@@ -85,18 +87,34 @@ def get_cached_module(module_name, object_names, cache_dir, timeout):
             logger.info(f"Waiting for {ready_name} to appear.")
             time.sleep(1)
         raise TimeoutError(f"""JIT compilation timed out, probably due to a failed previous compile.
-        Try cleaning cache (e.g. remove {c_filename}) or increase timeout parameter.""")
+        Try cleaning cache (e.g. remove {c_filename}) or increase timeout option.""")
+
+
+def _compilation_signature(cffi_extra_compile_args=None, cffi_debug=None):
+    """Compute the compilation-inputs part of the signature.
+
+    Used to avoid cache conflicts across Python versions, architectures, installs.
+
+    - SOABI includes platform, Python version, debug flags
+    - CFLAGS includes prefixes, arch targets
+    """
+    return (
+        str(cffi_extra_compile_args)
+        + str(cffi_debug)
+        + sysconfig.get_config_var("CFLAGS")
+        + sysconfig.get_config_var("SOABI")
+    )
 
 
-def compile_elements(elements, parameters=None, cache_dir=None, timeout=10, cffi_extra_compile_args=None,
+def compile_elements(elements, options=None, cache_dir=None, timeout=10, cffi_extra_compile_args=None,
                      cffi_verbose=False, cffi_debug=None, cffi_libraries=None):
     """Compile a list of UFL elements and dofmaps into Python objects."""
-    p = ffcx.parameters.get_parameters(parameters)
+    p = ffcx.options.get_options(options)
 
     # Get a signature for these elements
     module_name = 'libffcx_elements_' + \
-        ffcx.naming.compute_signature(elements, _compute_parameter_signature(p)
-                                      + str(cffi_extra_compile_args) + str(cffi_debug))
+        ffcx.naming.compute_signature(elements, _compute_option_signature(p)
+                                      + _compilation_signature(cffi_extra_compile_args, cffi_debug))
 
     names = []
     for e in elements:
@@ -137,15 +155,15 @@ def compile_elements(elements, parameters=None, cache_dir=None, timeout=10, cffi
     return objects, module, (decl, impl)
 
 
-def compile_forms(forms, parameters=None, cache_dir=None, timeout=10, cffi_extra_compile_args=None,
+def compile_forms(forms, options=None, cache_dir=None, timeout=10, cffi_extra_compile_args=None,
                   cffi_verbose=False, cffi_debug=None, cffi_libraries=None):
     """Compile a list of UFL forms into UFC Python objects."""
-    p = ffcx.parameters.get_parameters(parameters)
+    p = ffcx.options.get_options(options)
 
     # Get a signature for these forms
     module_name = 'libffcx_forms_' + \
-        ffcx.naming.compute_signature(forms, _compute_parameter_signature(p)
-                                      + str(cffi_extra_compile_args) + str(cffi_debug))
+        ffcx.naming.compute_signature(forms, _compute_option_signature(p)
+                                      + _compilation_signature(cffi_extra_compile_args, cffi_debug))
 
     form_names = [ffcx.naming.form_name(form, i, module_name) for i, form in enumerate(forms)]
 
@@ -177,21 +195,21 @@ def compile_forms(forms, parameters=None, cache_dir=None, timeout=10, cffi_extra
     return obj, module, (decl, impl)
 
 
-def compile_expressions(expressions, parameters=None, cache_dir=None, timeout=10, cffi_extra_compile_args=None,
+def compile_expressions(expressions, options=None, cache_dir=None, timeout=10, cffi_extra_compile_args=None,
                         cffi_verbose=False, cffi_debug=None, cffi_libraries=None):
     """Compile a list of UFL expressions into UFC Python objects.
 
-    Parameters
+    Options
     ----------
     expressions
         List of (UFL expression, evaluation points).
 
     """
-    p = ffcx.parameters.get_parameters(parameters)
+    p = ffcx.options.get_options(options)
 
     module_name = 'libffcx_expressions_' + \
-        ffcx.naming.compute_signature(expressions, _compute_parameter_signature(p)
-                                      + str(cffi_extra_compile_args) + str(cffi_debug))
+        ffcx.naming.compute_signature(expressions, _compute_option_signature(p)
+                                      + _compilation_signature(cffi_extra_compile_args, cffi_debug))
     expr_names = [ffcx.naming.expression_name(expression, module_name) for expression in expressions]
 
     if cache_dir is not None:
@@ -222,14 +240,14 @@ def compile_expressions(expressions, parameters=None, cache_dir=None, timeout=10
     return obj, module, (decl, impl)
 
 
-def _compile_objects(decl, ufl_objects, object_names, module_name, parameters, cache_dir,
+def _compile_objects(decl, ufl_objects, object_names, module_name, options, cache_dir,
                      cffi_extra_compile_args, cffi_verbose, cffi_debug, cffi_libraries):
 
     import ffcx.compiler
 
     # JIT uses module_name as prefix, which is needed to make names of all struct/function
     # unique across modules
-    _, code_body = ffcx.compiler.compile_ufl_objects(ufl_objects, prefix=module_name, parameters=parameters)
+    _, code_body = ffcx.compiler.compile_ufl_objects(ufl_objects, prefix=module_name, options=options)
 
     ffibuilder = cffi.FFI()
     ffibuilder.set_source(module_name, code_body, include_dirs=[ffcx.codegeneration.get_include_path()],
diff --git a/ffcx/codegeneration/symbols.py b/ffcx/codegeneration/symbols.py
index 4125648..5140d7c 100644
--- a/ffcx/codegeneration/symbols.py
+++ b/ffcx/codegeneration/symbols.py
@@ -45,7 +45,7 @@ def format_mt_name(basename, mt):
     # Format local derivatives
     if mt.local_derivatives:
         # Convert "listing" derivative multindex into "counting" representation
-        gdim = mt.terminal.ufl_domain().geometric_dimension()
+        gdim = ufl.domain.extract_unique_domain(mt.terminal).geometric_dimension()
         ld_counting = ufl.utils.derivativetuples.derivative_listing_to_counts(mt.local_derivatives, gdim)
         der = f"_d{''.join(map(str, ld_counting))}"
         access += der
diff --git a/ffcx/codegeneration/ufcx.h b/ffcx/codegeneration/ufcx.h
index 0eb8bea..64baa2b 100644
--- a/ffcx/codegeneration/ufcx.h
+++ b/ffcx/codegeneration/ufcx.h
@@ -10,7 +10,7 @@
 #pragma once
 
 #define UFCX_VERSION_MAJOR 0
-#define UFCX_VERSION_MINOR 5
+#define UFCX_VERSION_MINOR 6
 #define UFCX_VERSION_MAINTENANCE 0
 #define UFCX_VERSION_RELEASE 1
 
@@ -64,7 +64,8 @@ extern "C"
     ufcx_basix_element = 0,
     ufcx_mixed_element = 1,
     ufcx_quadrature_element = 2,
-    ufcx_basix_custom_element = 3
+    ufcx_basix_custom_element = 3,
+    ufcx_real_element = 4,
   } ufcx_element_type;
 
   /// Forward declarations
@@ -163,10 +164,10 @@ extern "C"
     /// The number of rows in the wcoeffs matrix
     int wcoeffs_rows;
 
-    /// The number of columnss in the wcoeffs matrix
+    /// The number of columns in the wcoeffs matrix
     int wcoeffs_cols;
 
-    /// The coefficents that define the polynomial set of the element in terms
+    /// The coefficients that define the polynomial set of the element in terms
     /// of the orthonormal polynomials on the cell
     double* wcoeffs;
 
@@ -185,6 +186,9 @@ extern "C"
     /// The map type for the element
     int map_type;
 
+    /// The Sobolev space for the element
+    int sobolev_space;
+
     /// Indicates whether or not this is the discontinuous version of the element
     bool discontinuous;
 
diff --git a/ffcx/compiler.py b/ffcx/compiler.py
index 1366ebd..a584fe8 100644
--- a/ffcx/compiler.py
+++ b/ffcx/compiler.py
@@ -81,12 +81,12 @@ def _print_timing(stage: int, timing: float):
 
 def compile_ufl_objects(ufl_objects: typing.List[typing.Any],
                         object_names: typing.Dict = {},
-                        prefix: str = None,
-                        parameters: typing.Dict = {},
+                        prefix: typing.Optional[str] = None,
+                        options: typing.Dict = {},
                         visualise: bool = False):
     """Generate UFC code for a given UFL objects.
 
-    Parameters
+    Options
     ----------
     @param ufl_objects:
         Objects to be compiled. Accepts elements, forms, integrals or coordinate mappings.
@@ -94,22 +94,22 @@ def compile_ufl_objects(ufl_objects: typing.List[typing.Any],
     """
     # Stage 1: analysis
     cpu_time = time()
-    analysis = analyze_ufl_objects(ufl_objects, parameters)
+    analysis = analyze_ufl_objects(ufl_objects, options)
     _print_timing(1, time() - cpu_time)
 
     # Stage 2: intermediate representation
     cpu_time = time()
-    ir = compute_ir(analysis, object_names, prefix, parameters, visualise)
+    ir = compute_ir(analysis, object_names, prefix, options, visualise)
     _print_timing(2, time() - cpu_time)
 
     # Stage 3: code generation
     cpu_time = time()
-    code = generate_code(ir, parameters)
+    code = generate_code(ir, options)
     _print_timing(3, time() - cpu_time)
 
     # Stage 4: format code
     cpu_time = time()
-    code_h, code_c = format_code(code, parameters)
+    code_h, code_c = format_code(code, options)
     _print_timing(4, time() - cpu_time)
 
     return code_h, code_c
diff --git a/ffcx/element_interface.py b/ffcx/element_interface.py
index c658716..ac1bd91 100644
--- a/ffcx/element_interface.py
+++ b/ffcx/element_interface.py
@@ -8,14 +8,14 @@
 from __future__ import annotations
 
 import typing
-
 import warnings
+from functools import lru_cache
 
-import basix
 import numpy
-import ufl
+
+import basix
 import basix.ufl_wrapper
-from functools import lru_cache
+import ufl
 
 
 def convert_element(element: ufl.finiteelement.FiniteElementBase) -> basix.ufl_wrapper._BasixElementBase:
@@ -37,7 +37,6 @@ def create_element(element: ufl.finiteelement.FiniteElementBase) -> basix.ufl_wr
     """
     if isinstance(element, basix.ufl_wrapper._BasixElementBase):
         return element
-
     elif isinstance(element, ufl.VectorElement):
         return basix.ufl_wrapper.VectorElement(create_element(element.sub_elements()[0]), element.num_sub_elements())
     elif isinstance(element, ufl.TensorElement):
@@ -51,10 +50,12 @@ def create_element(element: ufl.finiteelement.FiniteElementBase) -> basix.ufl_wr
         return basix.ufl_wrapper.MixedElement([create_element(e) for e in element.sub_elements()])
     elif isinstance(element, ufl.EnrichedElement):
         return basix.ufl_wrapper._create_enriched_element([create_element(e) for e in element._elements])
-
     elif element.family() == "Quadrature":
-        return QuadratureElement(element)
+        return QuadratureElement(element.cell().cellname(), element.value_shape(), scheme=element.quadrature_scheme(),
+                                 degree=element.degree())
 
+    elif element.family() == "Real":
+        return RealElement(element)
     else:
         return basix.ufl_wrapper.convert_ufl_element(element)
 
@@ -102,18 +103,187 @@ class QuadratureElement(basix.ufl_wrapper._BasixElementBase):
     """A quadrature element."""
 
     _points: basix.ufl_wrapper._nda_f64
+    _weights: basix.ufl_wrapper._nda_f64
     _entity_counts: typing.List[int]
+    _cellname: str
+
+    def __init__(
+        self, cellname: str, value_shape: typing.Tuple[int, ...], scheme: typing.Optional[str] = None,
+        degree: typing.Optional[int] = None, points: typing.Optional[basix.ufl_wrapper._nda_f64] = None,
+        weights: typing.Optional[basix.ufl_wrapper._nda_f64] = None, mapname: str = "identity"
+    ):
+        """Initialise the element."""
+        if scheme is not None:
+            assert degree is not None
+            assert points is None
+            assert weights is None
+            repr = f"QuadratureElement({cellname}, {scheme}, {degree})"
+            self._points, self._weights = create_quadrature(cellname, degree, scheme)
+        else:
+            assert degree is None
+            assert points is not None
+            assert weights is not None
+            self._points = points
+            self._weights = weights
+            repr = f"QuadratureElement({cellname}, {points}, {weights})"
+            degree = len(points)
+
+        self._cellname = cellname
+        basix_cell = basix.cell.string_to_type(cellname)
+        self._entity_counts = [len(i) for i in basix.topology(basix_cell)]
+
+        super().__init__(repr, "quadrature element", cellname, value_shape, degree, mapname=mapname)
+
+    def basix_sobolev_space(self):
+        """Return the underlying Sobolev space."""
+        return basix.sobolev_spaces.L2
+
+    def __eq__(self, other) -> bool:
+        """Check if two elements are equal."""
+        return isinstance(other, QuadratureElement) and numpy.allclose(self._points, other._points)
+
+    def __hash__(self) -> int:
+        """Return a hash."""
+        return super().__hash__()
+
+    def tabulate(
+        self, nderivs: int, points: basix.ufl_wrapper._nda_f64
+    ) -> basix.ufl_wrapper._nda_f64:
+        """Tabulate the basis functions of the element.
+
+        Args:
+            nderivs: Number of derivatives to tabulate.
+            points: Points to tabulate at
+
+        Returns:
+            Tabulated basis functions
+        """
+        if nderivs > 0:
+            raise ValueError("Cannot take derivatives of Quadrature element.")
+
+        if points.shape != self._points.shape:
+            raise ValueError("Mismatch of tabulation points and element points.")
+        tables = numpy.asarray([numpy.eye(points.shape[0], points.shape[0])])
+        return tables
+
+    def get_component_element(self, flat_component: int) -> typing.Tuple[basix.ufl_wrapper._BasixElementBase, int, int]:
+        """Get element that represents a component of the element, and the offset and stride of the component.
+
+        Args:
+            flat_component: The component
+
+        Returns:
+            component element, offset of the component, stride of the component
+        """
+        return self, 0, 1
+
+    @property
+    def ufcx_element_type(self) -> str:
+        """Element type."""
+        return "ufcx_quadrature_element"
+
+    @property
+    def dim(self) -> int:
+        """Number of DOFs the element has."""
+        return self._points.shape[0]
+
+    @property
+    def num_entity_dofs(self) -> typing.List[typing.List[int]]:
+        """Number of DOFs associated with each entity."""
+        dofs = []
+        for d in self._entity_counts[:-1]:
+            dofs += [[0] * d]
+
+        dofs += [[self.dim]]
+        return dofs
+
+    @property
+    def entity_dofs(self) -> typing.List[typing.List[typing.List[int]]]:
+        """DOF numbers associated with each entity."""
+        start_dof = 0
+        entity_dofs = []
+        for i in self.num_entity_dofs:
+            dofs_list = []
+            for j in i:
+                dofs_list.append([start_dof + k for k in range(j)])
+                start_dof += j
+            entity_dofs.append(dofs_list)
+        return entity_dofs
+
+    @property
+    def num_entity_closure_dofs(self) -> typing.List[typing.List[int]]:
+        """Number of DOFs associated with the closure of each entity."""
+        return self.num_entity_dofs
+
+    @property
+    def entity_closure_dofs(self) -> typing.List[typing.List[typing.List[int]]]:
+        """DOF numbers associated with the closure of each entity."""
+        return self.entity_dofs
+
+    @property
+    def num_global_support_dofs(self) -> int:
+        """Get the number of global support DOFs."""
+        return 0
+
+    @property
+    def reference_topology(self) -> typing.List[typing.List[typing.List[int]]]:
+        """Topology of the reference element."""
+        raise NotImplementedError()
+
+    @property
+    def reference_geometry(self) -> basix.ufl_wrapper._nda_f64:
+        """Geometry of the reference element."""
+        raise NotImplementedError()
+
+    @property
+    def family_name(self) -> str:
+        """Family name of the element."""
+        return "quadrature"
+
+    @property
+    def lagrange_variant(self) -> basix.LagrangeVariant:
+        """Basix Lagrange variant used to initialise the element."""
+        return None
+
+    @property
+    def dpc_variant(self) -> basix.DPCVariant:
+        """Basix DPC variant used to initialise the element."""
+        return None
+
+    @property
+    def element_family(self) -> basix.ElementFamily:
+        """Basix element family used to initialise the element."""
+        return None
+
+    @property
+    def cell_type(self) -> basix.CellType:
+        """Basix cell type used to initialise the element."""
+        return basix.cell.string_to_type(self._cellname)
+
+    @property
+    def discontinuous(self) -> bool:
+        """True if the discontinuous version of the element is used."""
+        return False
+
+    @property
+    def map_type(self) -> basix.MapType:
+        """The Basix map type."""
+        return basix.MapType.identity
+
+
+class RealElement(basix.ufl_wrapper._BasixElementBase):
+    """A real element."""
+
     _family_name: str
     _cellname: str
+    _entity_counts: typing.List[int]
 
     def __init__(self, element: ufl.finiteelement.FiniteElementBase):
         """Initialise the element."""
-        self._points, _ = create_quadrature(element.cell().cellname(),
-                                            element.degree(), element.quadrature_scheme())
-
         self._cellname = element.cell().cellname()
         self._family_name = element.family()
         tdim = element.cell().topological_dimension()
+
         self._entity_counts = []
         if tdim >= 1:
             self._entity_counts.append(element.cell().num_vertices())
@@ -124,12 +294,12 @@ class QuadratureElement(basix.ufl_wrapper._BasixElementBase):
         self._entity_counts.append(1)
 
         super().__init__(
-            f"QuadratureElement({element})", "quadrature element", element.cell().cellname(), element.value_shape(),
+            f"RealElement({element})", "real element", element.cell().cellname(), element.value_shape(),
             element.degree())
 
     def __eq__(self, other) -> bool:
         """Check if two elements are equal."""
-        return isinstance(other, QuadratureElement) and numpy.allclose(self._points, other._points)
+        return isinstance(other, RealElement)
 
     def __hash__(self) -> int:
         """Return a hash."""
@@ -147,13 +317,9 @@ class QuadratureElement(basix.ufl_wrapper._BasixElementBase):
         Returns:
             Tabulated basis functions
         """
-        if nderivs > 0:
-            raise ValueError("Cannot take derivatives of Quadrature element.")
-
-        if points.shape != self._points.shape:
-            raise ValueError("Mismatch of tabulation points and element points.")
-        tables = numpy.asarray([numpy.eye(points.shape[0], points.shape[0])])
-        return tables
+        out = numpy.zeros((nderivs + 1, len(points), 1))
+        out[0, :] = 1.
+        return out
 
     def get_component_element(self, flat_component: int) -> typing.Tuple[basix.ufl_wrapper._BasixElementBase, int, int]:
         """Get element that represents a component of the element, and the offset and stride of the component.
@@ -164,17 +330,18 @@ class QuadratureElement(basix.ufl_wrapper._BasixElementBase):
         Returns:
             component element, offset of the component, stride of the component
         """
+        assert flat_component < self.value_size
         return self, 0, 1
 
     @property
     def ufcx_element_type(self) -> str:
         """Element type."""
-        return "ufcx_quadrature_element"
+        return "ufcx_real_element"
 
     @property
     def dim(self) -> int:
         """Number of DOFs the element has."""
-        return self._points.shape[0]
+        return 0
 
     @property
     def num_entity_dofs(self) -> typing.List[typing.List[int]]:
@@ -212,7 +379,7 @@ class QuadratureElement(basix.ufl_wrapper._BasixElementBase):
     @property
     def num_global_support_dofs(self) -> int:
         """Get the number of global support DOFs."""
-        return 0
+        return 1
 
     @property
     def reference_topology(self) -> typing.List[typing.List[typing.List[int]]]:
@@ -254,7 +421,11 @@ class QuadratureElement(basix.ufl_wrapper._BasixElementBase):
         """True if the discontinuous version of the element is used."""
         return False
 
+    def basix_sobolev_space(self):
+        """Return the underlying Sobolev space."""
+        return basix.sobolev_spaces.Hinf
+
     @property
-    def interpolation_nderivs(self) -> int:
-        """The number of derivatives needed when interpolating."""
-        return 0
+    def map_type(self) -> basix.MapType:
+        """The Basix map type."""
+        return basix.MapType.identity
diff --git a/ffcx/formatting.py b/ffcx/formatting.py
index 8f4e55b..f9932aa 100644
--- a/ffcx/formatting.py
+++ b/ffcx/formatting.py
@@ -58,22 +58,22 @@ c_extern_post = """
 """
 
 
-def format_code(code, parameters: dict):
+def format_code(code, options: dict):
     """Format given code in UFC format. Returns two strings with header and source file contents."""
     logger.info(79 * "*")
     logger.info("Compiler stage 5: Formatting code")
     logger.info(79 * "*")
 
     # Generate code for comment at top of file
-    code_h_pre = _generate_comment(parameters) + "\n"
-    code_c_pre = _generate_comment(parameters) + "\n"
+    code_h_pre = _generate_comment(options) + "\n"
+    code_c_pre = _generate_comment(options) + "\n"
 
     # Generate code for header
     code_h_pre += FORMAT_TEMPLATE["header_h"]
     code_c_pre += FORMAT_TEMPLATE["header_c"]
 
     # Generate includes and add to preamble
-    includes_h, includes_c = _generate_includes(parameters)
+    includes_h, includes_c = _generate_includes(options)
     code_h_pre += includes_h
     code_c_pre += includes_c
 
@@ -107,22 +107,22 @@ def _write_file(output, prefix, postfix, output_dir):
         hfile.write(output)
 
 
-def _generate_comment(parameters):
+def _generate_comment(options):
     """Generate code for comment on top of file."""
     # Generate top level comment
     comment = FORMAT_TEMPLATE["ufc comment"].format(ffcx_version=FFCX_VERSION, ufcx_version=UFC_VERSION)
 
-    # Add parameter information
+    # Add option information
     comment += "//\n"
-    comment += "// This code was generated with the following parameters:\n"
+    comment += "// This code was generated with the following options:\n"
     comment += "//\n"
-    comment += textwrap.indent(pprint.pformat(parameters), "//  ")
+    comment += textwrap.indent(pprint.pformat(options), "//  ")
     comment += "\n"
 
     return comment
 
 
-def _generate_includes(parameters: dict):
+def _generate_includes(options: dict):
 
     default_h_includes = [
         "#include <ufcx.h>",
@@ -136,7 +136,7 @@ def _generate_includes(parameters: dict):
         "#include <ufcx.h>"
     ]
 
-    if "_Complex" in parameters["scalar_type"]:
+    if "_Complex" in options["scalar_type"]:
         default_c_includes += ["#include <complex.h>"]
 
     s_h = set(default_h_includes)
diff --git a/ffcx/ir/analysis/graph.py b/ffcx/ir/analysis/graph.py
index fc14157..7f7dcda 100644
--- a/ffcx/ir/analysis/graph.py
+++ b/ffcx/ir/analysis/graph.py
@@ -8,6 +8,7 @@
 import logging
 
 import numpy
+
 import ufl
 from ffcx.ir.analysis.modified_terminals import is_modified_terminal
 from ffcx.ir.analysis.reconstruct import reconstruct
diff --git a/ffcx/ir/analysis/modified_terminals.py b/ffcx/ir/analysis/modified_terminals.py
index 3983d76..d3e967c 100644
--- a/ffcx/ir/analysis/modified_terminals.py
+++ b/ffcx/ir/analysis/modified_terminals.py
@@ -10,6 +10,7 @@ from ufl.classes import (Argument, CellAvg, FacetAvg, FixedIndex, FormArgument,
                          Grad, Indexed, Jacobian, ReferenceGrad,
                          ReferenceValue, Restricted, SpatialCoordinate)
 from ufl.permutation import build_component_numbering
+
 from ...element_interface import convert_element
 
 logger = logging.getLogger("ffcx")
diff --git a/ffcx/ir/analysis/valuenumbering.py b/ffcx/ir/analysis/valuenumbering.py
index d675295..d95da84 100644
--- a/ffcx/ir/analysis/valuenumbering.py
+++ b/ffcx/ir/analysis/valuenumbering.py
@@ -146,11 +146,11 @@ class ValueNumberer(object):
         num_gd = len(mt.global_derivatives)
         assert not (num_ld and num_gd)
         if num_ld:
-            domain = mt.terminal.ufl_domain()
+            domain = ufl.domain.extract_unique_domain(mt.terminal)
             tdim = domain.topological_dimension()
             d_components = ufl.permutation.compute_indices((tdim, ) * num_ld)
         elif num_gd:
-            domain = mt.terminal.ufl_domain()
+            domain = ufl.domain.extract_unique_domiain(mt.terminal)
             gdim = domain.geometric_dimension()
             d_components = ufl.permutation.compute_indices((gdim, ) * num_gd)
         else:
diff --git a/ffcx/ir/elementtables.py b/ffcx/ir/elementtables.py
index 7dc030e..8f7d901 100644
--- a/ffcx/ir/elementtables.py
+++ b/ffcx/ir/elementtables.py
@@ -9,9 +9,10 @@ import logging
 import typing
 
 import numpy
+
 import ufl
 import ufl.utils.derivativetuples
-from ffcx.element_interface import basix_index, convert_element
+from ffcx.element_interface import basix_index, convert_element, QuadratureElement
 from ffcx.ir.representationutils import (create_quadrature_points_and_weights,
                                          integral_type_to_entity_dim,
                                          map_integral_points)
@@ -101,9 +102,13 @@ def get_ffcx_table_values(points, cell, integral_type, element, avg, entitytype,
         elif avg == "facet":
             integral_type = "exterior_facet"
 
-        # Make quadrature rule and get points and weights
-        points, weights = create_quadrature_points_and_weights(integral_type, cell,
-                                                               element.degree(), "default")
+        if isinstance(element, QuadratureElement):
+            points = element._points
+            weights = element._weights
+        else:
+            # Make quadrature rule and get points and weights
+            points, weights = create_quadrature_points_and_weights(
+                integral_type, cell, element.highest_degree(), "default")
 
     # Tabulate table of basis functions and derivatives in points for each entity
     tdim = cell.topological_dimension()
@@ -182,7 +187,7 @@ def generate_psi_table_name(quadrature_rule, element_counter, averaged: str, ent
 def get_modified_terminal_element(mt) -> typing.Optional[ModifiedTerminalElement]:
     gd = mt.global_derivatives
     ld = mt.local_derivatives
-
+    domain = ufl.domain.extract_unique_domain(mt.terminal)
     # Extract element from FormArguments and relevant GeometricQuantities
     if isinstance(mt.terminal, ufl.classes.FormArgument):
         if gd and mt.reference_value:
@@ -198,7 +203,7 @@ def get_modified_terminal_element(mt) -> typing.Optional[ModifiedTerminalElement
             raise RuntimeError("Not expecting reference value of x.")
         if gd:
             raise RuntimeError("Not expecting global derivatives of x.")
-        element = convert_element(mt.terminal.ufl_domain().ufl_coordinate_element())
+        element = convert_element(domain.ufl_coordinate_element())
         if not ld:
             fc = mt.flat_component
         else:
@@ -211,7 +216,7 @@ def get_modified_terminal_element(mt) -> typing.Optional[ModifiedTerminalElement
             raise RuntimeError("Not expecting reference value of J.")
         if gd:
             raise RuntimeError("Not expecting global derivatives of J.")
-        element = convert_element(mt.terminal.ufl_domain().ufl_coordinate_element())
+        element = convert_element(domain.ufl_coordinate_element())
         assert len(mt.component) == 2
         # Translate component J[i,d] to x element context rgrad(x[i])[d]
         fc, d = mt.component  # x-component, derivative
@@ -221,7 +226,7 @@ def get_modified_terminal_element(mt) -> typing.Optional[ModifiedTerminalElement
 
     assert (mt.averaged is None) or not (ld or gd)
     # Change derivatives format for table lookup
-    gdim = mt.terminal.ufl_domain().geometric_dimension()
+    gdim = domain.geometric_dimension()
     local_derivatives = ufl.utils.derivativetuples.derivative_listing_to_counts(
         ld, gdim)
 
diff --git a/ffcx/ir/integral.py b/ffcx/ir/integral.py
index 243ebfd..7db59c4 100644
--- a/ffcx/ir/integral.py
+++ b/ffcx/ir/integral.py
@@ -11,6 +11,7 @@ import logging
 import typing
 
 import numpy
+
 import ufl
 from ffcx.ir.analysis.factorization import compute_argument_factorization
 from ffcx.ir.analysis.graph import build_scalar_graph
@@ -48,8 +49,8 @@ def compute_integral_ir(cell, integral_type, entitytype, integrands, argument_sh
     # here
     ir = {}
 
-    # Pass on parameters for consumption in code generation
-    ir["params"] = p
+    # Pass on options for consumption in code generation
+    ir["options"] = p
 
     # Shared unique tables for all quadrature loops
     ir["unique_tables"] = {}
diff --git a/ffcx/ir/representation.py b/ffcx/ir/representation.py
index 5abee07..cfcfa3f 100644
--- a/ffcx/ir/representation.py
+++ b/ffcx/ir/representation.py
@@ -22,8 +22,9 @@ import numbers
 import typing
 import warnings
 
-import basix
 import numpy
+
+import basix
 import ufl
 from ffcx import naming
 from ffcx.analysis import UFLData
@@ -62,6 +63,7 @@ class CustomElementIR(typing.NamedTuple):
     x: typing.List[typing.List[numpy.typing.NDArray[numpy.float64]]]
     M: typing.List[typing.List[numpy.typing.NDArray[numpy.float64]]]
     map_type: basix.MapType
+    sobolev_space: basix.SobolevSpace
     interpolation_nderivs: int
     discontinuous: bool
     highest_complete_degree: int
@@ -124,7 +126,7 @@ class IntegralIR(typing.NamedTuple):
     coefficient_numbering: typing.Dict[ufl.Coefficient, int]
     coefficient_offsets: typing.Dict[ufl.Coefficient, int]
     original_constant_offsets: typing.Dict[ufl.Constant, int]
-    params: dict
+    options: dict
     cell_shape: str
     unique_tables: typing.Dict[str, numpy.typing.NDArray[numpy.float64]]
     unique_table_types: typing.Dict[str, str]
@@ -138,7 +140,7 @@ class IntegralIR(typing.NamedTuple):
 class ExpressionIR(typing.NamedTuple):
     name: str
     element_dimensions: typing.Dict[ufl.FiniteElementBase, int]
-    params: dict
+    options: dict
     unique_tables: typing.Dict[str, numpy.typing.NDArray[numpy.float64]]
     unique_table_types: typing.Dict[str, str]
     integrand: typing.Dict[QuadratureRule, dict]
@@ -166,7 +168,7 @@ class DataIR(typing.NamedTuple):
     expressions: typing.List[ExpressionIR]
 
 
-def compute_ir(analysis: UFLData, object_names, prefix, parameters, visualise):
+def compute_ir(analysis: UFLData, object_names, prefix, options, visualise):
     """Compute intermediate representation."""
     logger.info(79 * "*")
     logger.info("Compiler stage 2: Computing intermediate representation of objects")
@@ -197,7 +199,7 @@ def compute_ir(analysis: UFLData, object_names, prefix, parameters, visualise):
 
     irs = [
         _compute_integral_ir(fd, i, analysis.element_numbers, integral_names, finite_element_names,
-                             parameters, visualise)
+                             options, visualise)
         for (i, fd) in enumerate(analysis.form_data)
     ]
     ir_integrals = list(itertools.chain(*irs))
@@ -208,7 +210,7 @@ def compute_ir(analysis: UFLData, object_names, prefix, parameters, visualise):
         for (i, fd) in enumerate(analysis.form_data)
     ]
 
-    ir_expressions = [_compute_expression_ir(expr, i, prefix, analysis, parameters, visualise, object_names,
+    ir_expressions = [_compute_expression_ir(expr, i, prefix, analysis, options, visualise, object_names,
                                              finite_element_names, dofmap_names)
                       for i, expr in enumerate(analysis.expressions)]
 
@@ -235,7 +237,7 @@ def _compute_element_ir(element, element_numbers, finite_element_names):
     ir["cell_shape"] = element.cell_type.name
     ir["topological_dimension"] = cell.topological_dimension()
     ir["geometric_dimension"] = cell.geometric_dimension()
-    ir["space_dimension"] = element.dim
+    ir["space_dimension"] = element.dim + element.num_global_support_dofs
     ir["element_type"] = element.ufcx_element_type
     ir["lagrange_variant"] = element.lagrange_variant
     ir["dpc_variant"] = element.dpc_variant
@@ -273,6 +275,7 @@ def _compute_custom_element_ir(basix_element: basix.finite_element.FiniteElement
     ir["x"] = basix_element.x
     ir["M"] = basix_element.M
     ir["map_type"] = basix_element.map_type
+    ir["sobolev_space"] = basix_element.sobolev_space
     ir["discontinuous"] = basix_element.discontinuous
     ir["interpolation_nderivs"] = basix_element.interpolation_nderivs
     ir["highest_complete_degree"] = basix_element.highest_complete_degree
@@ -319,14 +322,14 @@ def _compute_dofmap_ir(element, element_numbers, dofmap_names):
     ir["entity_closure_dofs"] = element.entity_closure_dofs
 
     ir["num_global_support_dofs"] = element.num_global_support_dofs
-    ir["num_element_support_dofs"] = element.dim - ir["num_global_support_dofs"]
+    ir["num_element_support_dofs"] = element.dim
 
     return DofMapIR(**ir)
 
 
 def _compute_integral_ir(form_data, form_index, element_numbers, integral_names,
-                         finite_element_names, parameters, visualise):
-    """Compute intermediate represention for form integrals."""
+                         finite_element_names, options, visualise):
+    """Compute intermediate representation for form integrals."""
     _entity_types = {
         "cell": "cell",
         "exterior_facet": "facet",
@@ -364,7 +367,8 @@ def _compute_integral_ir(form_data, form_index, element_numbers, integral_names,
 
         # Get element space dimensions
         unique_elements = element_numbers.keys()
-        ir["element_dimensions"] = {element: element.dim for element in unique_elements}
+        ir["element_dimensions"] = {element: element.dim + element.num_global_support_dofs
+                                    for element in unique_elements}
 
         ir["element_ids"] = {
             element: i
@@ -391,7 +395,6 @@ def _compute_integral_ir(form_data, form_index, element_numbers, integral_names,
         for integral in itg_data.integrals:
             md = integral.metadata() or {}
             scheme = md["quadrature_rule"]
-            degree = md["quadrature_degree"]
 
             if scheme == "custom":
                 points = md["quadrature_points"]
@@ -406,6 +409,7 @@ def _compute_integral_ir(form_data, form_index, element_numbers, integral_names,
                 # scheme have some properties that other schemes lack, e.g., the
                 # mass matrix is a simple diagonal matrix. This may be
                 # prescribed in certain cases.
+                degree = md["quadrature_degree"]
                 if degree > 1:
                     warnings.warn(
                         "Explicitly selected vertex quadrature (degree 1), but requested degree is {}.".
@@ -421,6 +425,7 @@ def _compute_integral_ir(form_data, form_index, element_numbers, integral_names,
                     # Trapezoidal rule
                     return (numpy.array([[0.0], [1.0]]), numpy.array([1.0 / 2.0, 1.0 / 2.0]))
             else:
+                degree = md["quadrature_degree"]
                 points, weights = create_quadrature_points_and_weights(
                     integral_type, cell, degree, scheme)
 
@@ -479,7 +484,7 @@ def _compute_integral_ir(form_data, form_index, element_numbers, integral_names,
         # Build more specific intermediate representation
         integral_ir = compute_integral_ir(itg_data.domain.ufl_cell(), itg_data.integral_type,
                                           ir["entitytype"], integrands, ir["tensor_shape"],
-                                          parameters, visualise)
+                                          options, visualise)
 
         ir.update(integral_ir)
 
@@ -531,7 +536,7 @@ def _compute_form_ir(form_data, form_id, prefix, form_names, integral_names, ele
         el = convert_element(convert_element(function.ufl_function_space().ufl_element()))
         cmap = function.ufl_function_space().ufl_domain().ufl_coordinate_element()
         # Default point spacing for CoordinateElement is equispaced
-        if cmap.variant() is None:
+        if not isinstance(cmap, basix.ufl_wrapper._BasixElementBase) and cmap.variant() is None:
             cmap._sub_element._variant = "equispaced"
         cmap = convert_element(cmap)
         family = cmap.family()
@@ -577,7 +582,7 @@ def _compute_form_ir(form_data, form_id, prefix, form_names, integral_names, ele
     return FormIR(**ir)
 
 
-def _compute_expression_ir(expression, index, prefix, analysis, parameters, visualise, object_names,
+def _compute_expression_ir(expression, index, prefix, analysis, options, visualise, object_names,
                            finite_element_names, dofmap_names):
     """Compute intermediate representation of expression."""
     logger.info(f"Computing IR for expression {index}")
@@ -594,7 +599,7 @@ def _compute_expression_ir(expression, index, prefix, analysis, parameters, visu
     expression = expression[0]
 
     try:
-        cell = expression.ufl_domain().ufl_cell()
+        cell = ufl.domain.extract_unique_domain(expression).ufl_cell()
     except AttributeError:
         # This case corresponds to a spatially constant expression
         # without any dependencies
@@ -602,7 +607,8 @@ def _compute_expression_ir(expression, index, prefix, analysis, parameters, visu
 
     # Prepare dimensions of all unique element in expression, including
     # elements for arguments, coefficients and coordinate mappings
-    ir["element_dimensions"] = {element: element.dim for element in analysis.unique_elements}
+    ir["element_dimensions"] = {element: element.dim + element.num_global_support_dofs
+                                for element in analysis.unique_elements}
 
     # Extract dimensions for elements of arguments only
     arguments = ufl.algorithms.extract_arguments(expression)
@@ -685,7 +691,7 @@ def _compute_expression_ir(expression, index, prefix, analysis, parameters, visu
         assert len(ir["original_coefficient_positions"]) == 0 and len(ir["original_constant_offsets"]) == 0
 
     expression_ir = compute_integral_ir(cell, ir["integral_type"], ir["entitytype"], integrands, tensor_shape,
-                                        parameters, visualise)
+                                        options, visualise)
 
     ir.update(expression_ir)
 
diff --git a/ffcx/ir/representationutils.py b/ffcx/ir/representationutils.py
index 6ce5120..3eda83a 100644
--- a/ffcx/ir/representationutils.py
+++ b/ffcx/ir/representationutils.py
@@ -9,6 +9,7 @@ import hashlib
 import logging
 
 import numpy
+
 import ufl
 from ffcx.element_interface import (create_quadrature, map_facet_points,
                                     reference_cell_vertices)
diff --git a/ffcx/main.py b/ffcx/main.py
index fb6d9cc..6d9a156 100644
--- a/ffcx/main.py
+++ b/ffcx/main.py
@@ -16,10 +16,9 @@ import re
 import string
 
 import ufl
-
 from ffcx import __version__ as FFCX_VERSION
 from ffcx import compiler, formatting
-from ffcx.parameters import FFCX_DEFAULT_PARAMETERS, get_parameters
+from ffcx.options import FFCX_DEFAULT_OPTIONS, get_options
 
 logger = logging.getLogger("ffcx")
 
@@ -31,10 +30,10 @@ parser.add_argument("-o", "--output-directory", type=str, default=".", help="out
 parser.add_argument("--visualise", action="store_true", help="visualise the IR graph")
 parser.add_argument("-p", "--profile", action='store_true', help="enable profiling")
 
-# Add all parameters from FFCx parameter system
-for param_name, (param_val, param_desc) in FFCX_DEFAULT_PARAMETERS.items():
-    parser.add_argument(f"--{param_name}",
-                        type=type(param_val), help=f"{param_desc} (default={param_val})")
+# Add all options from FFCx option system
+for opt_name, (opt_val, opt_desc) in FFCX_DEFAULT_OPTIONS.items():
+    parser.add_argument(f"--{opt_name}",
+                        type=type(opt_val), help=f"{opt_desc} (default={opt_val})")
 
 parser.add_argument("ufl_file", nargs='+', help="UFL file(s) to be compiled")
 
@@ -42,9 +41,9 @@ parser.add_argument("ufl_file", nargs='+', help="UFL file(s) to be compiled")
 def main(args=None):
     xargs = parser.parse_args(args)
 
-    # Parse all other parameters
-    priority_parameters = {k: v for k, v in xargs.__dict__.items() if v is not None}
-    parameters = get_parameters(priority_parameters)
+    # Parse all other options
+    priority_options = {k: v for k, v in xargs.__dict__.items() if v is not None}
+    options = get_options(priority_options)
 
     # Call parser and compiler for each file
     for filename in xargs.ufl_file:
@@ -67,7 +66,7 @@ def main(args=None):
         # Generate code
         code_h, code_c = compiler.compile_ufl_objects(
             ufd.forms + ufd.expressions + ufd.elements, ufd.object_names,
-            prefix=prefix, parameters=parameters, visualise=xargs.visualise)
+            prefix=prefix, options=options, visualise=xargs.visualise)
 
         # Write to file
         formatting.write_code(code_h, code_c, prefix, xargs.output_directory)
diff --git a/ffcx/naming.py b/ffcx/naming.py
index dc05b51..5d802f8 100644
--- a/ffcx/naming.py
+++ b/ffcx/naming.py
@@ -9,9 +9,10 @@ import typing
 
 import numpy
 import numpy.typing
-import ufl
 
 import ffcx
+import ufl
+
 from .element_interface import convert_element
 
 
@@ -54,7 +55,8 @@ def compute_signature(ufl_objects: typing.List[
                 domains.append(*arg.ufl_function_space().ufl_domains())
             for gc in ufl.algorithms.analysis.extract_type(expr, ufl.classes.GeometricQuantity):
                 domains.append(*gc.ufl_domains())
-
+            for const in consts:
+                domains.append(const.ufl_domain())
             domains = ufl.algorithms.analysis.unique_tuple(domains)
             rn.update(dict((d, i) for i, d in enumerate(domains)))
 
diff --git a/ffcx/parameters.py b/ffcx/options.py
similarity index 53%
rename from ffcx/parameters.py
rename to ffcx/options.py
index aabe06d..3be3cad 100644
--- a/ffcx/parameters.py
+++ b/ffcx/options.py
@@ -10,12 +10,12 @@ import logging
 import os
 import os.path
 import pprint
-from typing import Optional, Dict, Any
 from pathlib import Path
+from typing import Any, Dict, Optional
 
 logger = logging.getLogger("ffcx")
 
-FFCX_DEFAULT_PARAMETERS = {
+FFCX_DEFAULT_OPTIONS = {
     "epsilon":
         (1e-14, "Machine precision, used for dropping zero terms in tables"),
     "scalar_type":
@@ -39,76 +39,76 @@ FFCX_DEFAULT_PARAMETERS = {
 
 
 @functools.lru_cache(maxsize=None)
-def _load_parameters():
-    """Load parameters from JSON files."""
+def _load_options():
+    """Load options from JSON files."""
     user_config_file = os.getenv("XDG_CONFIG_HOME", default=Path.home().joinpath(".config")) \
-        / Path("ffcx", "ffcx_parameters.json")
+        / Path("ffcx", "ffcx_options.json")
     try:
         with open(user_config_file) as f:
-            user_parameters = json.load(f)
+            user_options = json.load(f)
     except FileNotFoundError:
-        user_parameters = {}
+        user_options = {}
 
-    pwd_config_file = Path.cwd().joinpath("ffcx_parameters.json")
+    pwd_config_file = Path.cwd().joinpath("ffcx_options.json")
     try:
         with open(pwd_config_file) as f:
-            pwd_parameters = json.load(f)
+            pwd_options = json.load(f)
     except FileNotFoundError:
-        pwd_parameters = {}
+        pwd_options = {}
 
-    return (user_parameters, pwd_parameters)
+    return (user_options, pwd_options)
 
 
-def get_parameters(priority_parameters: Optional[dict] = None) -> dict:
-    """Return (a copy of) the merged parameter values for FFCX.
+def get_options(priority_options: Optional[dict] = None) -> dict:
+    """Return (a copy of) the merged option values for FFCX.
 
-    Parameters
+    Options
     ----------
-      priority_parameters:
-        take priority over all other parameter values (see notes)
+      priority_options:
+        take priority over all other option values (see notes)
 
     Returns
     -------
-      dict: merged parameter values
+      dict: merged option values
 
     Notes
     -----
-    This function sets the log level from the merged parameter values prior to
+    This function sets the log level from the merged option values prior to
     returning.
 
-    The `ffcx_parameters.json` files are cached on the first call. Subsequent
+    The `ffcx_options.json` files are cached on the first call. Subsequent
     calls to this function use this cache.
 
-    Priority ordering of parameters from highest to lowest is:
+    Priority ordering of options from highest to lowest is:
 
-    -  **priority_parameters** (API and command line parameters)
-    -  **$PWD/ffcx_parameters.json** (local parameters)
-    -  **$XDG_CONFIG_HOME/ffcx/ffcx_parameters.json** (user parameters)
-    -  **FFCX_DEFAULT_PARAMETERS** in `ffcx.parameters`
+    -  **priority_options** (API and command line options)
+    -  **$PWD/ffcx_options.json** (local options)
+    -  **$XDG_CONFIG_HOME/ffcx/ffcx_options.json** (user options)
+    -  **FFCX_DEFAULT_OPTIONS** in `ffcx.options`
 
     `XDG_CONFIG_HOME` is `~/.config/` if the environment variable is not set.
 
-    Example `ffcx_parameters.json` file:
+    Example `ffcx_options.json` file:
 
       { "assume_aligned": 32, "epsilon": 1e-7 }
 
     """
-    parameters: Dict[str, Any] = {}
+    options: Dict[str, Any] = {}
 
-    for param, (value, _) in FFCX_DEFAULT_PARAMETERS.items():
-        parameters[param] = value
+    for opt, (value, _) in FFCX_DEFAULT_OPTIONS.items():
+        options[opt] = value
 
-    # NOTE: _load_parameters uses functools.lru_cache
-    user_parameters, pwd_parameters = _load_parameters()
+    # NOTE: _load_options uses functools.lru_cache
+    user_options, pwd_options = _load_options()
 
-    parameters.update(user_parameters)
-    parameters.update(pwd_parameters)
-    if priority_parameters is not None:
-        parameters.update(priority_parameters)
+    options.update(user_options)
+    options.update(pwd_options)
+    if priority_options is not None:
+        options.update(priority_options)
 
-    logger.setLevel(parameters["verbosity"])
+    logger.setLevel(options["verbosity"])
 
-    logger.info("Final parameter values")
-    logger.info(pprint.pformat(parameters))
+    logger.info("Final option values")
+    logger.info(pprint.pformat(options))
 
-    return parameters
+    return options
diff --git a/mypy.ini b/mypy.ini
index 3222cc0..c6c56c2 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -1,4 +1,14 @@
 [mypy]
+# Suggested at https://blog.wolt.com/engineering/2021/09/30/professional-grade-mypy-configuration/
+# Goal would be to make all of the below True long-term
+disallow_untyped_defs = False
+disallow_any_unimported = False
+no_implicit_optional = False
+check_untyped_defs = False
+warn_return_any = False
+warn_unused_ignores = False
+show_error_codes = True
+
 [mypy-pygraphviz.*]
 ignore_missing_imports = True
 [mypy-ufl.*]
diff --git a/pyproject.toml b/pyproject.toml
index 2824775..d713325 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -7,7 +7,7 @@ build-backend = "setuptools.build_meta"
 minversion = "6.0"
 addopts = "-ra"
 testpaths = [
-    "tests"
+    "test"
 ]
 norecursedirs = [
     "libs",
diff --git a/setup.cfg b/setup.cfg
index 5e88752..24871c0 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -2,7 +2,7 @@
 # future
 [metadata]
 name = fenics-ffcx
-version = 0.5.0
+version = 0.6.0
 author = FEniCS Project Contributors
 email = fenics-dev@googlegroups.com
 maintainer = FEniCS Project Steering Council
@@ -13,8 +13,8 @@ project_urls =
     Documentation = https://docs.fenicsproject.org
     Issues = https://github.com/FEniCS/ffcx/issues
     Funding = https://numfocus.org/donate
-long_description = file: README.rst
-long_description_content_type = text/x-rst
+long_description = file: README.md
+long_description_content_type = text/markdown
 license=LGPL-3.0-or-later
 classifiers =
     Development Status :: 5 - Production/Stable
@@ -46,17 +46,18 @@ install_requires =
     numpy
     cffi
     setuptools
-    fenics-basix >= 0.5.0, <0.6.0
-    fenics-ufl >= 2022.2.0, <2022.3.0
+    fenics-basix >= 0.6.0, <0.7.0
+    fenics-ufl >= 2023.1.0, <2023.2.0
 
 [options.extras_require]
 docs = sphinx; sphinx_rtd_theme
 lint = flake8; pydocstyle[toml]
-optional = pygraphviz
+optional = pygraphviz == 1.7
 test = pytest >= 6.0; sympy
 ci =
     coverage
     coveralls
+    isort
     pytest-cov
     pytest-xdist
     mypy
@@ -73,5 +74,8 @@ console_scripts =
 [flake8]
 max-line-length = 120
 exclude = .git,__pycache__,docs/source/conf.py,build,dist,libs
-ignore = W503,  # Line length
-         E741   # Variable names l, O, I, ...
+ignore =
+    # Line length
+    W503,
+    # Variable names l, O, I, ...
+    E741,
diff --git a/setup.py b/setup.py
index daa285d..2b7a7f2 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,6 @@ import setuptools
 
 try:
     import pip
-
     from packaging import version
     if version.parse(pip.__version__) < version.parse("21.3"):
         # Issue with older version of pip https://github.com/pypa/pip/issues/7953
diff --git a/test/test_add_mode.py b/test/test_add_mode.py
index 2a085b5..c4fd5fd 100644
--- a/test/test_add_mode.py
+++ b/test/test_add_mode.py
@@ -4,9 +4,10 @@
 #
 # SPDX-License-Identifier:    LGPL-3.0-or-later
 
-import ffcx.codegeneration.jit
 import numpy as np
 import pytest
+
+import ffcx.codegeneration.jit
 import ufl
 from ffcx.naming import cdtype_to_numpy, scalar_to_value_type
 
@@ -26,7 +27,7 @@ def test_additive_facet_integral(mode, compile_args):
     a = ufl.inner(u, v) * ufl.ds
     forms = [a]
     compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(
-        forms, parameters={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
+        forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
 
     for f, compiled_f in zip(forms, compiled_forms):
         assert compiled_f.rank == len(f.arguments())
@@ -75,7 +76,7 @@ def test_additive_cell_integral(mode, compile_args):
     a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx
     forms = [a]
     compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(
-        forms, parameters={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
+        forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
 
     for f, compiled_f in zip(forms, compiled_forms):
         assert compiled_f.rank == len(f.arguments())
diff --git a/test/test_flops.py b/test/test_flops.py
index d81edd5..833e765 100644
--- a/test/test_flops.py
+++ b/test/test_flops.py
@@ -5,8 +5,8 @@
 # SPDX-License-Identifier:    LGPL-3.0-or-later
 
 
-from ffcx.codegeneration.flop_count import count_flops
 import ufl
+from ffcx.codegeneration.flop_count import count_flops
 
 
 def create_form(degree):
diff --git a/test/test_jit_expression.py b/test/test_jit_expression.py
index 8457234..99d406e 100644
--- a/test/test_jit_expression.py
+++ b/test/test_jit_expression.py
@@ -5,10 +5,11 @@
 #
 # SPDX-License-Identifier:    LGPL-3.0-or-later
 
-import basix
 import cffi
-import ffcx.codegeneration.jit
 import numpy as np
+
+import basix
+import ffcx.codegeneration.jit
 import ufl
 from ffcx.naming import cdtype_to_numpy, scalar_to_value_type
 
@@ -172,7 +173,7 @@ def test_elimiate_zero_tables_tensor(compile_args):
                                   [u.dx(1), u.dx(1), 0],
                                   [0, 0, 0]]))
 
-    # Get vectices of cell
+    # Get vertices of cell
     # Coords storage XYZXYZXYZ
     basix_c_e = basix.create_element(basix.ElementFamily.P, basix.cell.string_to_type(cell), 1, False)
     coords = basix_c_e.points
diff --git a/test/test_jit_forms.py b/test/test_jit_forms.py
index 62b7c85..3d7b6fd 100644
--- a/test/test_jit_forms.py
+++ b/test/test_jit_forms.py
@@ -4,13 +4,14 @@
 #
 # SPDX-License-Identifier:    LGPL-3.0-or-later
 
-import ffcx.codegeneration.jit
 import numpy as np
 import pytest
 import sympy
+from sympy.abc import x, y, z
+
+import ffcx.codegeneration.jit
 import ufl
 from ffcx.naming import cdtype_to_numpy, scalar_to_value_type
-from sympy.abc import x, y, z
 
 
 @pytest.mark.parametrize("mode,expected_result", [
@@ -30,7 +31,7 @@ def test_laplace_bilinear_form_2d(mode, expected_result, compile_args):
     a = ufl.tr(kappa) * ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx
     forms = [a]
     compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(
-        forms, parameters={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
+        forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
 
     for f, compiled_f in zip(forms, compiled_forms):
         assert compiled_f.rank == len(f.arguments())
@@ -102,7 +103,7 @@ def test_mass_bilinear_form_2d(mode, expected_result, compile_args):
     L = ufl.conj(v) * ufl.dx
     forms = [a, L]
     compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(
-        forms, parameters={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
+        forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
 
     for f, compiled_f in zip(forms, compiled_forms):
         assert compiled_f.rank == len(f.arguments())
@@ -161,7 +162,7 @@ def test_helmholtz_form_2d(mode, expected_result, compile_args):
     a = (ufl.inner(ufl.grad(u), ufl.grad(v)) - ufl.inner(k * u, v)) * ufl.dx
     forms = [a]
     compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(
-        forms, parameters={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
+        forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
 
     for f, compiled_f in zip(forms, compiled_forms):
         assert compiled_f.rank == len(f.arguments())
@@ -210,7 +211,7 @@ def test_laplace_bilinear_form_3d(mode, expected_result, compile_args):
     a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx
     forms = [a]
     compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(
-        forms, parameters={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
+        forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
 
     for f, compiled_f in zip(forms, compiled_forms):
         assert compiled_f.rank == len(f.arguments())
@@ -286,7 +287,7 @@ def test_subdomains(compile_args):
     a3 = ufl.inner(u, v) * ufl.ds(210) + ufl.inner(u, v) * ufl.ds(0)
     forms = [a0, a1, a2, a3]
     compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(
-        forms, parameters={'scalar_type': 'double'}, cffi_extra_compile_args=compile_args)
+        forms, options={'scalar_type': 'double'}, cffi_extra_compile_args=compile_args)
 
     for f, compiled_f in zip(forms, compiled_forms):
         assert compiled_f.rank == len(f.arguments())
@@ -318,7 +319,7 @@ def test_interior_facet_integral(mode, compile_args):
     a0 = ufl.inner(ufl.jump(ufl.grad(u)), ufl.jump(ufl.grad(v))) * ufl.dS
     forms = [a0]
     compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(
-        forms, parameters={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
+        forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
 
     for f, compiled_f in zip(forms, compiled_forms):
         assert compiled_f.rank == len(f.arguments())
@@ -374,7 +375,7 @@ def test_conditional(mode, compile_args):
     forms = [a, b]
 
     compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(
-        forms, parameters={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
+        forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
 
     form0 = compiled_forms[0].integrals(module.lib.cell)[0]
     form1 = compiled_forms[1].integrals(module.lib.cell)[0]
@@ -514,7 +515,7 @@ def test_lagrange_triangle(compile_args, order, mode, sym_fun, ufl_fun):
     a = ufl_fun(v) * ufl.dx
     forms = [a]
     compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(
-        forms, parameters={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
+        forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
 
     ffi = module.ffi
     form0 = compiled_forms[0]
@@ -606,7 +607,7 @@ def test_lagrange_tetrahedron(compile_args, order, mode, sym_fun, ufl_fun):
     a = ufl_fun(v) * ufl.dx
     forms = [a]
     compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(
-        forms, parameters={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
+        forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
 
     ffi = module.ffi
     form0 = compiled_forms[0]
@@ -645,7 +646,7 @@ def test_prism(compile_args):
     L = v * ufl.dx
     forms = [L]
     compiled_forms, module, _ = ffcx.codegeneration.jit.compile_forms(
-        forms, parameters={'scalar_type': 'double'}, cffi_extra_compile_args=compile_args)
+        forms, options={'scalar_type': 'double'}, cffi_extra_compile_args=compile_args)
 
     ffi = module.ffi
     form0 = compiled_forms[0]
@@ -682,7 +683,7 @@ def test_complex_operations(compile_args):
     forms = [J1, J2]
 
     compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(
-        forms, parameters={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
+        forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args)
 
     form0 = compiled_forms[0].integrals(module.lib.cell)[0]
     form1 = compiled_forms[1].integrals(module.lib.cell)[0]

More details

Full run details

Historical runs