New Upstream Release - sqlfluff

Ready changes

Summary

Merged new upstream version: 2.0.5 (was: 1.4.5).

Resulting package

Built on 2023-04-15T03:55 (took 1h24m)

The resulting binary packages can be installed (if you have the apt repository enabled) by running one of:

apt install -t fresh-releases sqlfluff-docapt install -t fresh-releases sqlfluff

Lintian Result

Diff

diff --git a/.github/workflows/add-issue-labels.yaml b/.github/workflows/add-issue-labels.yaml
index 1b87e74..f20bbb5 100644
--- a/.github/workflows/add-issue-labels.yaml
+++ b/.github/workflows/add-issue-labels.yaml
@@ -10,5 +10,5 @@ jobs:
       - uses: Naturalclar/issue-action@v2.0.2
         with:
           title-or-body: "title"
-          parameters: '[{"keywords": ["ansi"], "labels": ["ansi"]}, {"keywords": ["athena"], "labels": ["athena"]}, {"keywords": ["bigquery"], "labels": ["bigquery"]}, {"keywords": ["clickhouse"], "labels": ["clickhouse"]}, {"keywords": ["db2"], "labels": ["db2"]}, {"keywords": ["exasol"], "labels": ["exasol"]}, {"keywords": ["hive"], "labels": ["hive"]}, {"keywords": ["mysql"], "labels": ["mysql"]}, {"keywords": ["mysql"], "labels": ["mysql"]}, {"keywords": ["oracle"], "labels": ["oracle"]}, {"keywords": ["postgres"], "labels": ["postgres"]}, {"keywords": ["redshift"], "labels": ["redshift"]}, {"keywords": ["snowflake"], "labels": ["snowflake"]}, {"keywords": ["soql"], "labels": ["soql"]}, {"keywords": ["sparksql"], "labels": ["sparksql"]}, {"keywords": ["sqlite"], "labels": ["sqlite"]}, {"keywords": ["t-sql", "tsql"], "labels": ["t-sql"]}, {"keywords": ["teradata"], "labels": ["teradata"]}]'
+          parameters: '[{"keywords": ["ansi"], "labels": ["ansi"]}, {"keywords": ["athena"], "labels": ["athena"]}, {"keywords": ["bigquery"], "labels": ["bigquery"]}, {"keywords": ["clickhouse"], "labels": ["clickhouse"]}, {"keywords": ["databricks"], "labels": ["databricks"]}, {"keywords": ["db2"], "labels": ["db2"]}, {"keywords": ["duckdb"], "labels": ["duckdb"]}, {"keywords": ["exasol"], "labels": ["exasol"]}, {"keywords": ["greenplum"], "labels": ["greenplum"]}, {"keywords": ["hive"], "labels": ["hive"]}, {"keywords": ["mysql"], "labels": ["mysql"]}, {"keywords": ["mysql"], "labels": ["mysql"]}, {"keywords": ["oracle"], "labels": ["oracle"]}, {"keywords": ["postgres"], "labels": ["postgres"]}, {"keywords": ["redshift"], "labels": ["redshift"]}, {"keywords": ["snowflake"], "labels": ["snowflake"]}, {"keywords": ["soql"], "labels": ["soql"]}, {"keywords": ["sparksql"], "labels": ["sparksql"]}, {"keywords": ["sqlite"], "labels": ["sqlite"]}, {"keywords": ["t-sql", "tsql"], "labels": ["t-sql"]}, {"keywords": ["teradata"], "labels": ["teradata"]}]'
           github-token: "${{ secrets.GITHUB_TOKEN }}"
diff --git a/.github/workflows/check-yaml-files-generation.yml b/.github/workflows/check-yaml-files-generation.yml
deleted file mode 100644
index b9e87c3..0000000
--- a/.github/workflows/check-yaml-files-generation.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-name: "Check generated YAML files"
-on: pull_request
-jobs:
-  ymlchecks:
-    runs-on: ubuntu-latest
-    steps:
-    - uses: actions/checkout@v3
-    - name: Set up Python 3.11
-      uses: actions/setup-python@v3
-      with:
-        python-version: '3.11'
-    - name: Install dependencies
-      run: |
-        pip install -r requirements.txt -r requirements_dev.txt
-        python setup.py develop
-    - name: Generate the YAML files
-      run: |
-        python test/generate_parse_fixture_yml.py
-    - name: Test the generated YAML files
-      run: |
-        if [ -n "$(git status --porcelain)" ]; then
-          git diff
-          echo "Generated YAML files do not match branch."
-          echo "Please run the following command to generate these:"
-          echo "  python test/generate_parse_fixture_yml.py"
-          exit 1
-        fi
diff --git a/.github/workflows/ci-test-dbt.yml b/.github/workflows/ci-test-dbt.yml
new file mode 100644
index 0000000..9eb704f
--- /dev/null
+++ b/.github/workflows/ci-test-dbt.yml
@@ -0,0 +1,88 @@
+#############################
+## GitHub Actions CI Tests ##
+#############################
+#
+# This is a reusable workflow to make CI tests more modular.
+# See: https://docs.github.com/en/actions/using-workflows/reusing-workflows
+#
+# Called by ci-tests.yml
+# This one does the dbt tests
+#
+
+name: Modular SQLFluff dbt test workflow
+
+on:
+  workflow_call:
+    inputs:
+      python-version:
+        required: true
+        type: string
+      dbt-version:
+        required: true
+        type: string
+      coverage:
+        required: false
+        type: boolean
+        default: false
+    secrets:
+      gh_token:
+        required: true
+
+jobs:
+  modular-python-test:
+    name: py${{ inputs.python-version }}-${{ inputs.dbt-version }}
+    runs-on: ubuntu-latest
+
+    services:
+      # Label used to access the service container
+      postgres:
+        # Docker Hub image
+        image: postgres
+        # Provide the password for postgres
+        env:
+          POSTGRES_PASSWORD: password
+        # Set health checks to wait until postgres has started
+        options: >-
+          --health-cmd pg_isready
+          --health-interval 10s
+          --health-timeout 5s
+          --health-retries 5
+        ports:
+          # Maps tcp port 5432 on service container to the host
+          - 5432:5432
+
+    steps:
+    - uses: actions/checkout@v3
+
+    - name: Set up Python
+      uses: actions/setup-python@v4
+      with:
+        python-version: ${{ inputs.python-version }}
+
+    - name: Install dependencies
+      run: pip install tox
+
+    - name: Run the tests (with coverage)
+      if: ${{ inputs.coverage }}
+      run: tox -e ${{ inputs.dbt-version }} -- --cov=sqlfluff_templater_dbt plugins/sqlfluff-templater-dbt
+
+    - name: Run the tests (without coverage)
+      if: ${{ !inputs.coverage }}
+      run: tox -e ${{ inputs.dbt-version }} -- plugins/sqlfluff-templater-dbt
+
+    - name: Coveralls Parallel (coveralls)
+      uses: coverallsapp/github-action@master
+      if: ${{ inputs.coverage }}
+      with:
+        path-to-lcov: coverage.lcov
+        github-token: ${{ secrets.gh_token }}
+        flag-name: run-${{ inputs.dbt-version }}
+        parallel: true
+
+    - name: Upload coverage data (github)
+      uses: actions/upload-artifact@v3
+      if: ${{ inputs.coverage }}
+      with:
+        name: coverage-data
+        path: ".coverage.*"
+        if-no-files-found: ignore
diff --git a/.github/workflows/ci-test-python.yml b/.github/workflows/ci-test-python.yml
new file mode 100644
index 0000000..9afede8
--- /dev/null
+++ b/.github/workflows/ci-test-python.yml
@@ -0,0 +1,94 @@
+#############################
+## GitHub Actions CI Tests ##
+#############################
+#
+# This is a reusable workflow to make CI tests more modular.
+# See: https://docs.github.com/en/actions/using-workflows/reusing-workflows
+#
+# Called by ci-tests.yml
+# This one does the python tests
+#
+
+name: Modular SQLFluff python test workflow
+
+on:
+  workflow_call:
+    inputs:
+      python-version:
+        required: true
+        type: string
+      marks:
+        required: false
+        type: string
+        default: "not integration"
+      coverage:
+        required: false
+        type: boolean
+        default: false
+    secrets:
+      gh_token:
+        required: true
+
+jobs:
+  modular-python-test:
+    runs-on: ubuntu-latest
+    name: py${{ inputs.python-version }}
+    steps:
+    - uses: actions/checkout@v3
+
+    - name: Set up Python
+      uses: actions/setup-python@v4
+      with:
+        python-version: ${{ inputs.python-version }}
+
+    - name: Install dependencies
+      run: pip install tox
+
+    - name: Parse Python Version
+      id: py_version
+      run: |
+        PYVERSION=$(echo "${{ inputs.python-version }}" | sed -e 's/\.//g')
+        echo "PYVERSION=$PYVERSION" >> $GITHUB_OUTPUT
+
+    # Run test process (with or without coverage).
+    # Arguments after the "--" are passed through to pytest:
+    #   --cov=...       The library to include in coverage reporting.
+    #   -n 2            Runs with two parallel processes.
+    #   test            The path to detect tests within.
+    #   -m ...          The pytest marks to filter tests.
+    #   --durations=16  Displays the 16 slowest runs to help with performance debugging.
+    - name: Run the tests (with coverage)
+      # NOTE: We have a separate job for coverage reporting because
+      # it impacts performance and slows the test suite significantly.
+      if: ${{ inputs.coverage }}
+      run: tox -e py${{ steps.py_version.outputs.PYVERSION }} -- --cov=sqlfluff -n 2 test -m "${{ inputs.marks }}" --durations=16
+    - name: Run the tests (without coverage)
+      if: ${{ !inputs.coverage }}
+      run: tox -e py${{ steps.py_version.outputs.PYVERSION }} -- -n 2 test -m "${{ inputs.marks }}" --durations=16
+
+    - name: Rename coverage files with suffix
+      # NOTE: We do this because we're using the same tox environment for multiple
+      # test jobs and we need to make sure that their coverage files don't collide.s
+      id: cov_suffix
+      if: ${{ inputs.coverage }}
+      run: |
+        COVSUFFIX=$(echo "${{ inputs.marks }}" | sed -e 's/ /-/g')
+        echo "COVSUFFIX=$COVSUFFIX" >> $GITHUB_OUTPUT
+        for file in .coverage.*; do mv "$file" "$file.$COVSUFFIX"; done;
+
+    - name: Coveralls Parallel (coveralls)
+      uses: coverallsapp/github-action@master
+      if: ${{ inputs.coverage }}
+      with:
+        path-to-lcov: coverage.lcov
+        github-token: ${{ secrets.gh_token }}
+        flag-name: run-${{ inputs.python-version }}-${{ steps.cov_suffix.outputs.COVSUFFIX }}
+        parallel: true
+
+    - name: Upload coverage data (github)
+      uses: actions/upload-artifact@v3
+      if: ${{ inputs.coverage }}
+      with:
+        name: coverage-data
+        path: ".coverage.*"
+        if-no-files-found: ignore
diff --git a/.github/workflows/ci-tests.yml b/.github/workflows/ci-tests.yml
index a65c654..640c158 100644
--- a/.github/workflows/ci-tests.yml
+++ b/.github/workflows/ci-tests.yml
@@ -7,7 +7,7 @@
 # It will run on any pull request, except non-code changes
 # (images, markdown files, )
 #
-name: CI Tests
+name: CI
 on:
   workflow_dispatch:
   schedule:
@@ -17,8 +17,14 @@ on:
   push:
     branches:
       - main
+  merge_group:
+    # Merge Queue checks requested. This feature is still in beta
+    # from Github and so may need updating later.
+    # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#merge_group
+    types: [checks_requested]
 
 jobs:
+
   linting:
     runs-on: ubuntu-latest
     strategy:
@@ -28,57 +34,132 @@ jobs:
     steps:
     - uses: actions/checkout@v3
     - name: Set up Python
-      uses: actions/setup-python@v3
+      uses: actions/setup-python@v4
       with:
         python-version: '3.11'
     - name: Install dependencies
-      run: |
-        pip install tox
+      run: pip install tox
     - name: Run the tests
-      run: |
-        tox -e ${{ matrix.jobs }}
-  python-version-tests:
-    runs-on: ubuntu-latest
+      run: tox -e ${{ matrix.jobs }}
+
+  # Test with coverage tracking on most recent python (py11).
+  python-version-tests-cov:
+    name: Python 3.11 Tests
+    permissions:
+      contents: read
+      pull-requests: write
+    uses: ./.github/workflows/ci-test-python.yml
+    with:
+      python-version: "3.11"
+      coverage: true
+    secrets:
+      gh_token: ${{ secrets.github_token }}
+
+  # Test without coverage tracking on older python versions.
+  # This saves time, as testing without coverage tracking is faster.
+  python-version-tests-nocov:
+    name: Python ${{ matrix.python-version }} Tests
     strategy:
       matrix:
-        # We don't enforce coverage on older Python versions.
         python-version: [ '3.7', '3.8', '3.9', '3.10' ]
-    name: Python ${{ matrix.python-version }} tests
-    steps:
-    - uses: actions/checkout@v3
-    - name: Set up Python
-      uses: actions/setup-python@v3
-      with:
-        python-version: ${{ matrix.python-version }}
-    - name: Install dependencies
-      run: |
-        pip install tox
-    - name: Run the tests
-      run: |
-        tox -e py -- -n 2 test
-  python-version-tests-cov:
-    runs-on: ubuntu-latest
+    permissions:
+      contents: read
+      pull-requests: write
+    uses: ./.github/workflows/ci-test-python.yml
+    with:
+      python-version: ${{ matrix.python-version }}
+    secrets:
+      gh_token: ${{ secrets.github_token }}
+
+  dbt-tests-cov:
+    name: dbt 1.4 Plugin Tests
+    permissions:
+      contents: read
+      pull-requests: write
+    uses: ./.github/workflows/ci-test-dbt.yml
+    with:
+      python-version: "3.10"
+      dbt-version: "dbt140"
+      coverage: true
+    secrets:
+      gh_token: ${{ secrets.github_token }}
+
+  dbt-tests-nocov:
+    name: dbt 1.1 Plugin Tests
+    permissions:
+      contents: read
+      pull-requests: write
+    uses: ./.github/workflows/ci-test-dbt.yml
+    with:
+      python-version: "3.9"
+      dbt-version: "dbt110"
+    secrets:
+      gh_token: ${{ secrets.github_token }}
+
+  # This runs the bulk of the dialect _parsing_ tests.
+  #
+  # It's run as a separate job as takes longer than the CI jobs and allows
+  # them to be rerun separately if GitHub Actions or Coverage is experiencing
+  # issues.
+  dialects_parse:
+    name: Dialect parsing
     strategy:
       matrix:
-        # We enforce coverage on this, the latest Python version.
         python-version: [ '3.11' ]
-    name: Python ${{ matrix.python-version }} tests
-    steps:
-    - uses: actions/checkout@v3
-    - name: Set up Python
-      uses: actions/setup-python@v3
-      with:
-        python-version: ${{ matrix.python-version }}
-    - name: Install dependencies
-      run: |
-        pip install tox
-    - name: Run the tests
-      run: |
-        tox -e py -- -n 2 test
-    - name: Upload Coverage Report
-      uses: codecov/codecov-action@v1
-      with:
-        files: ./coverage.xml
+    permissions:
+      contents: read
+      pull-requests: write
+    uses: ./.github/workflows/ci-test-python.yml
+    with:
+      python-version: "3.11"
+      marks: "parse_suite"
+      # We test coverage here for some parsing routines.
+      coverage: true
+    secrets:
+      gh_token: ${{ secrets.github_token }}
+
+  # This lints all our dialect fixtures to check rules can handle a variety
+  # of SQL and don't error out badly.
+  #
+  # It's run as a separate job as takes longer than the CI jobs and allows
+  # them to be rerun separately if GitHub Actions or Coverage is experiencing
+  # issues.
+  dialects_fix:
+    name: Dialect fixing
+    strategy:
+      matrix:
+        python-version: [ '3.11' ]
+    permissions:
+      contents: read
+      pull-requests: write
+    uses: ./.github/workflows/ci-test-python.yml
+    with:
+      python-version: "3.11"
+      marks: "fix_suite"
+    secrets:
+      gh_token: ${{ secrets.github_token }}
+
+  # This lints all our rules fixtures to check rules.
+  #
+  # It's run as a separate job as takes longer than the CI jobs and allows
+  # them to be rerun separately if GitHub Actions or Coverage is experiencing
+  # issues.
+  rules:
+    name: Rule yaml test cases
+    strategy:
+      matrix:
+        python-version: [ '3.11' ]
+    permissions:
+      contents: read
+      pull-requests: write
+    uses: ./.github/workflows/ci-test-python.yml
+    with:
+      python-version: "3.11"
+      marks: "rules_suite"
+      coverage: true
+    secrets:
+      gh_token: ${{ secrets.github_token }}
+
   other-tests:
     runs-on: ubuntu-latest
     strategy:
@@ -88,24 +169,49 @@ jobs:
     steps:
     - uses: actions/checkout@v3
     - name: Set up Python
-      uses: actions/setup-python@v3
+      uses: actions/setup-python@v4
       with:
         python-version: '3.11'
     - name: Install dependencies
-      run: |
-        pip install tox
+      run: pip install tox
     - name: Run the tests
       env:
         SQLFLUFF_BENCHMARK_API_KEY: ${{ secrets.SQLFLUFF_BENCHMARK_API_KEY }}
       run: |
         tox -e ${{ matrix.jobs }}
+
+  ymlchecks:
+    runs-on: ubuntu-latest
+    steps:
+    - uses: actions/checkout@v3
+    - name: Set up Python 3.11
+      uses: actions/setup-python@v4
+      with:
+        python-version: '3.11'
+    - name: Install dependencies
+      run: |
+        pip install -r requirements.txt -r requirements_dev.txt
+        python setup.py develop
+    - name: Generate the YAML files
+      run: |
+        python test/generate_parse_fixture_yml.py
+    - name: Test the generated YAML files
+      run: |
+        if [ -n "$(git status --porcelain)" ]; then
+          git diff
+          echo "Generated YAML files do not match branch."
+          echo "Please run the following command to generate these:"
+          echo "  python test/generate_parse_fixture_yml.py"
+          exit 1
+        fi
+
   examples:
     runs-on: ubuntu-latest
     name: example tests
     steps:
     - uses: actions/checkout@v3
     - name: Set up Python
-      uses: actions/setup-python@v3
+      uses: actions/setup-python@v4
       with:
         python-version: '3.11'
     - name: Install dependencies
@@ -119,85 +225,10 @@ jobs:
           echo "Running $file"
           python "$file"
         done
-  dbt-tests:
-    runs-on: ubuntu-latest
-    services:
-      # Label used to access the service container
-      postgres:
-        # Docker Hub image
-        image: postgres
-        # Provide the password for postgres
-        env:
-          POSTGRES_PASSWORD: password
-        # Set health checks to wait until postgres has started
-        options: >-
-          --health-cmd pg_isready
-          --health-interval 10s
-          --health-timeout 5s
-          --health-retries 5
-        ports:
-          # Maps tcp port 5432 on service container to the host
-          - 5432:5432
-    strategy:
-      matrix:
-        # We don't enforce coverage on older dbt versions.
-        dbt-version: [ 'dbt020', 'dbt021', 'dbt100' ]
-    name: DBT Plugin ${{ matrix.dbt-version }} tests
-    steps:
-    - uses: actions/checkout@v3
-    - name: Set up Python
-      uses: actions/setup-python@v3
-      with:
-        python-version: '3.8'
-    - name: Install dependencies
-      run: |
-        pip install tox
-    - name: Run the tests
-      run: |
-        tox -e ${{ matrix.dbt-version }} -- plugins/sqlfluff-templater-dbt
-  dbt-tests-cov:
-    runs-on: ubuntu-latest
-    services:
-      # Label used to access the service container
-      postgres:
-        # Docker Hub image
-        image: postgres
-        # Provide the password for postgres
-        env:
-          POSTGRES_PASSWORD: password
-        # Set health checks to wait until postgres has started
-        options: >-
-          --health-cmd pg_isready
-          --health-interval 10s
-          --health-timeout 5s
-          --health-retries 5
-        ports:
-          # Maps tcp port 5432 on service container to the host
-          - 5432:5432
-    strategy:
-      matrix:
-        # We enforce coverage on this, the latest dbt version.
-        dbt-version: [ 'dbt130' ]
-    name: DBT Plugin ${{ matrix.dbt-version }} tests
-    steps:
-    - uses: actions/checkout@v3
-    - name: Set up Python
-      uses: actions/setup-python@v3
-      with:
-        python-version: '3.8'
-    - name: Install dependencies
-      run: |
-        pip install tox
-    - name: Run the tests
-      run: |
-        tox -e ${{ matrix.dbt-version }} -- plugins/sqlfluff-templater-dbt
-    - name: Upload Coverage Report
-      uses: codecov/codecov-action@v1
-      with:
-        files: ./coverage.xml
+
   python-windows-tests:
     runs-on: windows-latest
-    name: Python 3.8 Windows tests
+    name: Python 3.10 Windows tests
     steps:
     - name: Set git to use LF
       run: |
@@ -205,17 +236,16 @@ jobs:
         git config --global core.eol lf
     - uses: actions/checkout@v3
     - name: Set up Python
-      uses: actions/setup-python@v3
+      uses: actions/setup-python@v4
       with:
-        python-version: '3.8'
+        python-version: '3.10'
     - name: List Env
       shell: bash
       run: |
         env | sort
     - name: Install dependencies
       shell: bash
-      run: |
-        pip install tox
+      run: pip install tox
     - name: Run the tests
       shell: bash
       # Set python temp dir in working dir as on GitHub Actions Windows
@@ -223,14 +253,17 @@ jobs:
       # working dir on D drive which causes problems.
       run: |
         mkdir temp_pytest
-        python -m tox -e winpy -- -n 2 test
-    - name: Upload Coverage Report
-      uses: codecov/codecov-action@v1
+        python -m tox -e winpy -- --cov=sqlfluff -n 2 test -m "not integration"
+    - name: Upload coverage data (github)
+      uses: actions/upload-artifact@v3
       with:
-        files: ./coverage.xml
+        name: coverage-data
+        path: ".coverage.*"
+        if-no-files-found: ignore
+
   python-windows-dbt-tests:
     runs-on: windows-latest
-    name: DBT Plugin Python 3.8 Windows tests
+    name: DBT Plugin Python 3.10 Windows tests
     steps:
     - name: Start PostgreSQL on Windows
       run: |
@@ -246,19 +279,19 @@ jobs:
         git config --global core.eol lf
     - uses: actions/checkout@v3
     - name: Set up Python
-      uses: actions/setup-python@v3
+      uses: actions/setup-python@v4
       with:
-        python-version: '3.8'
+        python-version: '3.10'
     - name: Install dependencies
       shell: bash
-      run: |
-        pip install tox
+      run: pip install tox
     - name: Run the tests
       shell: bash
       # Do not set explicitly temp dir for dbt as causes problems
       # None of these test need temp dir set
       run: |
-        python -m tox -e dbt100-winpy -- plugins/sqlfluff-templater-dbt
+        python -m tox -e dbt140-winpy -- plugins/sqlfluff-templater-dbt
+
   pip-test-pull-request:
     # Test that using pip install works as we've missed
     # some dependencies in the past - see #1842
@@ -267,7 +300,7 @@ jobs:
     name: pip install tests
     steps:
     - name: Set up Python
-      uses: actions/setup-python@v3
+      uses: actions/setup-python@v4
       with:
         python-version: '3.11'
     - uses: actions/checkout@v3
@@ -289,3 +322,44 @@ jobs:
     - name: Run a simple select lint test via file
       run: |
         sqlfluff lint --dialect=ansi <(echo "select 1")
+
+  coveralls_finish:
+    name: Finalise coveralls.
+    needs: [python-version-tests-cov, dbt-tests-cov, python-windows-tests, dialects_parse, rules]
+    runs-on: ubuntu-latest
+    steps:
+    - name: Coveralls Finished
+      uses: coverallsapp/github-action@master
+      with:
+        github-token: ${{ secrets.github_token }}
+        parallel-finished: true
+
+  coverage_check:
+    name: Combine & check 100% coverage.
+    runs-on: ubuntu-latest
+    needs: [python-version-tests-cov, dbt-tests-cov, python-windows-tests, dialects_parse, rules]
+    steps:
+      - uses: actions/checkout@v3
+      - uses: actions/setup-python@v4
+        with:
+          python-version: "3.10"
+
+      - run: python -m pip install --upgrade coverage[toml]
+
+      - name: Download coverage data.
+        uses: actions/download-artifact@v3
+        with:
+          name: coverage-data
+
+      - name: Combine coverage & fail if it's <100%.
+        run: |
+          python -m coverage combine
+          python -m coverage html --skip-covered --skip-empty
+          python -m coverage report --fail-under=100
+
+      - name: Upload HTML report if check failed.
+        uses: actions/upload-artifact@v3
+        with:
+          name: html-report
+          path: htmlcov
+        if: ${{ failure() }}
diff --git a/.github/workflows/create-release-pull-request.yaml b/.github/workflows/create-release-pull-request.yaml
index ba8540c..8128c70 100644
--- a/.github/workflows/create-release-pull-request.yaml
+++ b/.github/workflows/create-release-pull-request.yaml
@@ -23,7 +23,7 @@ jobs:
             fi
 
       - name: Set up Python
-        uses: actions/setup-python@v3
+        uses: actions/setup-python@v4
         with:
           python-version: '3.11'
 
@@ -33,7 +33,7 @@ jobs:
 
       - name: Prepare release
         run: |
-          python util.py prepare-release --new_version_num=${{ github.event.inputs.newVersionNumber }}
+          python util.py release ${{ github.event.inputs.newVersionNumber }}
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
           GITHUB_REPOSITORY_OWNER: ${{ secrets.GITHUB_REPOSITORY_OWNER }}
diff --git a/.github/workflows/critical-rules-test.yml b/.github/workflows/critical-rules-test.yml
deleted file mode 100644
index 439c297..0000000
--- a/.github/workflows/critical-rules-test.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-#############################
-## GitHub Actions CI Tests ##
-#############################
-#
-# This lints all our dialect fixtures to check rules can handle a variety
-# of SQL and don't error out badly.
-#
-# It's run as a separate job as takes longer than the CI jobs and allows
-# them to be rerun separately if GitHub Actions or Coverage is experiencing
-# issues.
-#
-name: Critical CI Tests
-on:
-  workflow_dispatch:
-  schedule:
-    # 2am each night
-    - cron: '00 2 * * *'
-  pull_request:
-  push:
-    branches:
-      - main
-
-jobs:
-  ruleslinting:
-    runs-on: ubuntu-latest
-    name: Rules critical errors tests
-    steps:
-    - uses: actions/checkout@v3
-    - name: Set up Python
-      uses: actions/setup-python@v3
-      with:
-        python-version: '3.11'
-    - name: Install dependencies
-      run: |
-        pip install tox
-        pip install .
-    - name: Run the tests
-      run: |
-        tox -e ruleslinting -- -n 2 test
diff --git a/.github/workflows/publish-dbt-templater-release-to-pypi.yaml b/.github/workflows/publish-dbt-templater-release-to-pypi.yaml
index 4004b2e..98aa476 100644
--- a/.github/workflows/publish-dbt-templater-release-to-pypi.yaml
+++ b/.github/workflows/publish-dbt-templater-release-to-pypi.yaml
@@ -11,7 +11,7 @@ jobs:
     runs-on: ubuntu-latest
     steps:
       - uses: actions/checkout@v3
-      - uses: actions/setup-python@v3
+      - uses: actions/setup-python@v4
         with:
           python-version: "3.7"
 
diff --git a/.github/workflows/publish-sqlfluff-release-to-pypi.yaml b/.github/workflows/publish-sqlfluff-release-to-pypi.yaml
index 2351371..991cb77 100644
--- a/.github/workflows/publish-sqlfluff-release-to-pypi.yaml
+++ b/.github/workflows/publish-sqlfluff-release-to-pypi.yaml
@@ -11,7 +11,7 @@ jobs:
     runs-on: ubuntu-latest
     steps:
       - uses: actions/checkout@v3
-      - uses: actions/setup-python@v3
+      - uses: actions/setup-python@v4
         with:
           python-version: "3.7"
 
diff --git a/.gitignore b/.gitignore
index 9bc74ff..eb1c042 100644
--- a/.gitignore
+++ b/.gitignore
@@ -34,6 +34,7 @@ test-reports
 # Ignore root testing sql & python files
 /test*.sql
 /test*.py
+/test*.txt
 /.hypothesis/
 
 # Ignore dbt outputs from testing
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 5a34fd3..0c092bc 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -5,25 +5,25 @@ repos:
       # If adding any exceptions here, make sure to add them to .editorconfig as well
       - id: end-of-file-fixer
         exclude: |
-            (?x)^(
-              test/fixtures/linter/sqlfluffignore/|
-              test/fixtures/config/inheritance_b/example.sql|
-              test/fixtures/config/inheritance_b/nested/example.sql|
-              plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/trailing_newlines.sql|
-              plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/trailing_newlines.sql
-            )$
+          (?x)^(
+            test/fixtures/linter/sqlfluffignore/|
+            test/fixtures/config/inheritance_b/example.sql|
+            test/fixtures/config/inheritance_b/nested/example.sql|
+            plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/trailing_newlines.sql|
+            plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/trailing_newlines.sql
+          )$
       - id: trailing-whitespace
         exclude: |
-            (?x)^(
-              test/fixtures/linter/indentation_errors.sql|
-              test/fixtures/templater/jinja_d_roundtrip/test.sql|
-              test/fixtures/config/inheritance_b/example.sql|
-              test/fixtures/config/inheritance_b/nested/example.sql|
-              plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/macro_in_macro.sq|
-              plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/last_day.sql|
-              plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/dbt_utils_0.8.0/last_day.sql|
-              test/fixtures/linter/sqlfluffignore/
-            )$
+          (?x)^(
+            test/fixtures/linter/indentation_errors.sql|
+            test/fixtures/templater/jinja_d_roundtrip/test.sql|
+            test/fixtures/config/inheritance_b/example.sql|
+            test/fixtures/config/inheritance_b/nested/example.sql|
+            plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/macro_in_macro.sq|
+            plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/last_day.sql|
+            plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/dbt_utils_0.8.0/last_day.sql|
+            test/fixtures/linter/sqlfluffignore/
+          )$
   - repo: https://github.com/psf/black
     rev: 22.3.0
     hooks:
@@ -33,35 +33,35 @@ repos:
     hooks:
       - id: mypy
         args: [--ignore-missing-imports]
-        additional_dependencies: [
-          types-toml,
-          types-pkg_resources,
-          types-chardet,
-          types-appdirs,
-          types-colorama,
-          types-pyyaml,
-          types-regex,
-        ]
+        additional_dependencies:
+          [
+            types-toml,
+            types-pkg_resources,
+            types-chardet,
+            types-appdirs,
+            types-colorama,
+            types-pyyaml,
+            types-regex,
+          ]
         files: ^src/sqlfluff/.*
   - repo: https://github.com/pycqa/flake8
     rev: 4.0.1
     hooks:
       - id: flake8
-        additional_dependencies: [
-          flake8-black>=0.2.4,
-          flake8-docstrings,
-        ]
+        additional_dependencies: [flake8-black>=0.2.4, flake8-docstrings]
   - repo: https://github.com/pycqa/doc8
     rev: 0.10.1
     hooks:
       - id: doc8
-        args: [
-          --file-encoding,
-          utf8,
-        ]
+        args: [--file-encoding, utf8]
         files: docs/source/.*\.rst$
   - repo: https://github.com/adrienverge/yamllint.git
     rev: v1.26.3
     hooks:
       - id: yamllint
         args: [-c=.yamllint]
+  - repo: https://github.com/charliermarsh/ruff-pre-commit
+    # Ruff version.
+    rev: "v0.0.243"
+    hooks:
+      - id: ruff
diff --git a/.pre-commit-hooks.yaml b/.pre-commit-hooks.yaml
index c7e43b2..a587119 100644
--- a/.pre-commit-hooks.yaml
+++ b/.pre-commit-hooks.yaml
@@ -1,19 +1,23 @@
 - id: sqlfluff-lint
   name: sqlfluff-lint
-  entry: sqlfluff lint
+  # Set `--processes 0` to use maximum parallelism
+  entry: sqlfluff lint --processes 0
   language: python
-  description: 'Lints sql files with `SQLFluff`'
+  description: "Lints sql files with `SQLFluff`"
   types: [sql]
   require_serial: true
   additional_dependencies: []
 
 - id: sqlfluff-fix
   name: sqlfluff-fix
-  # Needs to use "--force" to disable confirmation
-  # By default all the rules are applied
-  entry: sqlfluff fix --force
+  # Set a couple of default flags:
+  #  - `--force` to disable confirmation
+  #  - `--show-lint-violations` shows issues to not require running `sqlfluff lint`
+  #  - `--processes 0` to use maximum parallelism
+  # By default, this hook applies all rules.
+  entry: sqlfluff fix --force --show-lint-violations --processes 0
   language: python
-  description: 'Fixes sql lint errors with `SQLFluff`'
+  description: "Fixes sql lint errors with `SQLFluff`"
   types: [sql]
   require_serial: true
   additional_dependencies: []
diff --git a/.readthedocs.yml b/.readthedocs.yml
index d846693..801ab3f 100644
--- a/.readthedocs.yml
+++ b/.readthedocs.yml
@@ -15,9 +15,17 @@ formats: []
 # Optionally set the version of Python and requirements required to build your docs.
 # In our case we need both the docs requirements and the package itself.
 python:
-  version: 3.7
   install:
     - requirements: requirements.txt
     - requirements: docs/requirements.txt
     - method: setuptools
       path: .
+
+build:
+  os: ubuntu-22.04
+  tools:
+    python: "3.11"
+  jobs:
+    # Before building, generate the rule docs
+    pre_build:
+      - python docs/generate-rule-docs.py
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e56b351..c41845e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -10,6 +10,788 @@ Note: Changes are now automatically tracked in [GitHub](https://github.com/sqlfl
 -->
 <!--Start Of Releases (DO NOT DELETE THIS LINE)-->
 
+## [2.0.5] - 2023-04-14
+
+## Highlights
+
+This is a relatively swift bugfix to refine some of the changes made to
+widow function indentation in `2.0.4`. In addition there are two dialect
+refinements also made since that release.
+
+## What’s Changed
+
+* Refactor PG segments to reuse new common segments [#4726](https://github.com/sqlfluff/sqlfluff/pull/4726) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Recognize quoted data types [#4747](https://github.com/sqlfluff/sqlfluff/pull/4747) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+
+
+## [2.0.4] - 2023-04-14
+
+## Highlights
+
+This is primarily a _bugfix_ and _dialect_ release:
+* Several bugfixes related to templating and indentation, in particular some
+  improvements to the indentation of aliases and window functions.
+* Performance improvements to the parser.
+* The `--persist-timing` option is now also available on `sqlfluff fix`.
+* A refresh to getting started and rule documentation.
+* Dialect improvements to PostgreSQL, Athena, SparkSQL, MySQL & Snowflake.
+
+Thanks also to [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+and [@Thashin](https://github.com/Thashin) who made their first contributions
+in this release. In particular, [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+made **twenty one** contributions in their first month! 🎉🎉🎉
+
+## What’s Changed
+
+* SparkSQL: Improvements to lateral view, hints, sort by [#4731](https://github.com/sqlfluff/sqlfluff/pull/4731) [@bmorck](https://github.com/bmorck)
+* Add ExpressionSegment to CREATE TABLE ... DEFAULT / Fix multiple parse issues in Expression_A_Grammar [#4717](https://github.com/sqlfluff/sqlfluff/pull/4717) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Add support for the PG VACUUM statement [#4742](https://github.com/sqlfluff/sqlfluff/pull/4742) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Simplify and fix PG array accessor segment & support expressions [#4748](https://github.com/sqlfluff/sqlfluff/pull/4748) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* SparkSQL: Allow for any ordering of create table clauses [#4721](https://github.com/sqlfluff/sqlfluff/pull/4721) [@bmorck](https://github.com/bmorck)
+* Suggested started config file [#4702](https://github.com/sqlfluff/sqlfluff/pull/4702) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Indents on window functions [#4560](https://github.com/sqlfluff/sqlfluff/pull/4560) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* SparkSQL: Fix Group By Clause [#4732](https://github.com/sqlfluff/sqlfluff/pull/4732) [@bmorck](https://github.com/bmorck)
+* Improve support for EXCLUDE table constraints in PG [#4725](https://github.com/sqlfluff/sqlfluff/pull/4725) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Add support for dropping multiple indexes in PG [#4737](https://github.com/sqlfluff/sqlfluff/pull/4737) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Recognize "on" value and integers for PG SET statement [#4740](https://github.com/sqlfluff/sqlfluff/pull/4740) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Improve interval expressions on MySQL [#4746](https://github.com/sqlfluff/sqlfluff/pull/4746) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Keep out zero length keywords [#4723](https://github.com/sqlfluff/sqlfluff/pull/4723) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Add PG support for CREATE SCHEMA AUTHORIZATION [#4735](https://github.com/sqlfluff/sqlfluff/pull/4735) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Add support for dropping multiple views with PostgreSQL [#4736](https://github.com/sqlfluff/sqlfluff/pull/4736) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Add CHAR VARYING data type for PG [#4738](https://github.com/sqlfluff/sqlfluff/pull/4738) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* fix(athena): map type matching failed, array type only contains a datatype [#4739](https://github.com/sqlfluff/sqlfluff/pull/4739) [@timcosta](https://github.com/timcosta)
+* Allow DML queries to be selectable in CTEs on PG [#4741](https://github.com/sqlfluff/sqlfluff/pull/4741) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Add the CREATE/DROP CAST statements to ANSI and PG [#4744](https://github.com/sqlfluff/sqlfluff/pull/4744) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Add support for PG SET ROLE / RESET ROLE [#4734](https://github.com/sqlfluff/sqlfluff/pull/4734) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Support Spark Iceberg DDL [#4690](https://github.com/sqlfluff/sqlfluff/pull/4690) [@bmorck](https://github.com/bmorck)
+* Fix #4680 [#4707](https://github.com/sqlfluff/sqlfluff/pull/4707) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Indent Aliases [#4706](https://github.com/sqlfluff/sqlfluff/pull/4706) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* SparkSQL: Improve window frame bounds [#4722](https://github.com/sqlfluff/sqlfluff/pull/4722) [@bmorck](https://github.com/bmorck)
+* Add support for PG CREATE/ALTER/DROP PUBLICATION stmts [#4716](https://github.com/sqlfluff/sqlfluff/pull/4716) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* SparkSQL: Create external table support [#4692](https://github.com/sqlfluff/sqlfluff/pull/4692) [@bmorck](https://github.com/bmorck)
+* SparkSQL: Fix file literal lexing [#4718](https://github.com/sqlfluff/sqlfluff/pull/4718) [@bmorck](https://github.com/bmorck)
+* Add PG DROP/REASSIGN OWNED statements [#4720](https://github.com/sqlfluff/sqlfluff/pull/4720) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* SparkSQL: Add distinct to comparison operator [#4719](https://github.com/sqlfluff/sqlfluff/pull/4719) [@bmorck](https://github.com/bmorck)
+* Rethink Rule Docs [#4695](https://github.com/sqlfluff/sqlfluff/pull/4695) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Performance: Reduce calls to _prune_options [#4705](https://github.com/sqlfluff/sqlfluff/pull/4705) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Snowflake: Add ReferencedVariableNameSegment to sample function [#4712](https://github.com/sqlfluff/sqlfluff/pull/4712) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Mark AM02 as fix compatible [#4714](https://github.com/sqlfluff/sqlfluff/pull/4714) [@yoichi](https://github.com/yoichi)
+* Fix LT01 spacing check in templated areas [#4698](https://github.com/sqlfluff/sqlfluff/pull/4698) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Don't do newline conversion on write [#4703](https://github.com/sqlfluff/sqlfluff/pull/4703) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* MySQL: CREATE/ALTER VIEW may take UNION [#4713](https://github.com/sqlfluff/sqlfluff/pull/4713) [@yoichi](https://github.com/yoichi)
+* Preserve zero-length template segments [#4708](https://github.com/sqlfluff/sqlfluff/pull/4708) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* CV06: don't flag files that don't have code [#4709](https://github.com/sqlfluff/sqlfluff/pull/4709) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Add a no-output option [#4704](https://github.com/sqlfluff/sqlfluff/pull/4704) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Jinja templater: treat "import" and "from" as templated [#4696](https://github.com/sqlfluff/sqlfluff/pull/4696) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Capitalization rules ignore templated code only if configured to [#4697](https://github.com/sqlfluff/sqlfluff/pull/4697) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack)
+* Update getting started docs [#4700](https://github.com/sqlfluff/sqlfluff/pull/4700) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Add a default for config_keywords and remove noisy error. [#4701](https://github.com/sqlfluff/sqlfluff/pull/4701) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Snowflake Select System Functions [#4687](https://github.com/sqlfluff/sqlfluff/pull/4687) [@Thashin](https://github.com/Thashin)
+* SparkSQL: Add using and options clause to create view statement [#4691](https://github.com/sqlfluff/sqlfluff/pull/4691) [@bmorck](https://github.com/bmorck)
+* MySQL: Add RETURN Statement [#4693](https://github.com/sqlfluff/sqlfluff/pull/4693) [@yoichi](https://github.com/yoichi)
+* Safety valve for fixes in CV03 [#4685](https://github.com/sqlfluff/sqlfluff/pull/4685) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Allow persist timing on `fix` too. [#4679](https://github.com/sqlfluff/sqlfluff/pull/4679) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* fix{dialect-snowflake}:Alter Table Column Set/Unset Tag [#4682](https://github.com/sqlfluff/sqlfluff/pull/4682) [@Thashin](https://github.com/Thashin)
+* fix{dialect-snowflake}:Execute Task [#4683](https://github.com/sqlfluff/sqlfluff/pull/4683) [@Thashin](https://github.com/Thashin)
+* Make version number an argument not an option in release script. [#4677](https://github.com/sqlfluff/sqlfluff/pull/4677) [@alanmcruickshank](https://github.com/alanmcruickshank)
+
+
+## New Contributors
+* [@Thashin](https://github.com/Thashin) made their first contribution in [#4683](https://github.com/sqlfluff/sqlfluff/pull/4683)
+* [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) made their first contribution in [#4697](https://github.com/sqlfluff/sqlfluff/pull/4697)
+
+## [2.0.3] - 2023-04-05
+
+## Highlights
+
+This is primarily a _bugfix_ and _dialect_ release:
+* Several bugfixes related to templating and indentation.
+* Configurable indentation before `THEN` in `CASE` statements
+  (see [#4598](https://github.com/sqlfluff/sqlfluff/pull/4598)).
+* Performance improvements to `TypedParser`, `LT03` & `LT04`.
+* Rule timings now appear in the `--persist-timing` option for deeper
+  performance understanding.
+* The introduction of a Greenplum dialect.
+* Dialect improvements to TSQL, Athena, Snowflake, MySQL, SparkSQL
+  BigQuery, Databricks, Clickhouse & Postgres.
+
+We also saw a _huge number of first time contributors_ with **9** contributing
+in this release 🎉🏆🎉.
+
+## What’s Changed
+
+* Better error message for missing keywords [#4676](https://github.com/sqlfluff/sqlfluff/pull/4676) [@tunetheweb](https://github.com/tunetheweb)
+* Add performance shortcuts to LT03 & LT04 [#4672](https://github.com/sqlfluff/sqlfluff/pull/4672) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Clickhouse: Add support for [LEFT] ARRAY JOIN [#4618](https://github.com/sqlfluff/sqlfluff/pull/4618) [@simpl1g](https://github.com/simpl1g)
+* Postgres - allow untyped OVERLAPS clauses [#4674](https://github.com/sqlfluff/sqlfluff/pull/4674) [@tunetheweb](https://github.com/tunetheweb)
+* Mark `is_alias_required` as a private class so it doesn't appear in docs [#4673](https://github.com/sqlfluff/sqlfluff/pull/4673) [@tunetheweb](https://github.com/tunetheweb)
+* Fix bug in templated with clauses LT07 [#4671](https://github.com/sqlfluff/sqlfluff/pull/4671) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* TSQL: `OPENJSON()` [#4652](https://github.com/sqlfluff/sqlfluff/pull/4652) [@keen85](https://github.com/keen85)
+* fix(RF06/L059): allows configuring prefer_quoted_keywords to deconflict with L029 [#4396](https://github.com/sqlfluff/sqlfluff/pull/4396) [@timcosta](https://github.com/timcosta)
+* TSQL: `Create External Table` [#4642](https://github.com/sqlfluff/sqlfluff/pull/4642) [@aly76](https://github.com/aly76)
+* Consistent indentation in `MERGE` `INSERT` clause [#4666](https://github.com/sqlfluff/sqlfluff/pull/4666) [@dmohns](https://github.com/dmohns)
+* BigQuery: Fix null assignment in options segment [#4669](https://github.com/sqlfluff/sqlfluff/pull/4669) [@greg-finley](https://github.com/greg-finley)
+* BigQuery: Delete table reference [#4668](https://github.com/sqlfluff/sqlfluff/pull/4668) [@greg-finley](https://github.com/greg-finley)
+* TSQL: `CREATE EXTERNAL FILE FORMAT` [#4647](https://github.com/sqlfluff/sqlfluff/pull/4647) [@keen85](https://github.com/keen85)
+* Remove TIME as reserved keyword in SparkSQL [#4662](https://github.com/sqlfluff/sqlfluff/pull/4662) [@bmorck](https://github.com/bmorck)
+* Start of the Greenplum dialect implementation  [#4661](https://github.com/sqlfluff/sqlfluff/pull/4661) [@JackWolverson](https://github.com/JackWolverson)
+* Enable configuring whether to require indent before THEN [#4598](https://github.com/sqlfluff/sqlfluff/pull/4598) [@fredriv](https://github.com/fredriv)
+* Sequence Meta Handling [#4622](https://github.com/sqlfluff/sqlfluff/pull/4622) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Add support for non-quoted file paths in SparkSQL [#4650](https://github.com/sqlfluff/sqlfluff/pull/4650) [@bmorck](https://github.com/bmorck)
+* Remove three RegexParsers [#4658](https://github.com/sqlfluff/sqlfluff/pull/4658) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Make parse test readout more helpful [#4657](https://github.com/sqlfluff/sqlfluff/pull/4657) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* TSQL: support for `sqlcmd` commands `:r` and `:setvar` [#4653](https://github.com/sqlfluff/sqlfluff/pull/4653) [@keen85](https://github.com/keen85)
+* Update README with Databricks note [#4632](https://github.com/sqlfluff/sqlfluff/pull/4632) [@liamperritt](https://github.com/liamperritt)
+* Athena: Fix parsing error with aliases starting with underscore [#4636](https://github.com/sqlfluff/sqlfluff/pull/4636) [@maiarareinaldo](https://github.com/maiarareinaldo)
+* Snowflake: Stop ever-increasing indent in CREATE USER [#4638](https://github.com/sqlfluff/sqlfluff/pull/4638) [@roman-ef](https://github.com/roman-ef)
+* TSQL: `PERIOD FOR SYSTEM_TIME` (temporal tables) [#4654](https://github.com/sqlfluff/sqlfluff/pull/4654) [@keen85](https://github.com/keen85)
+* MySQL: SelectStatementSegment in CREATE/ALTER VIEW may be bracketed [#4655](https://github.com/sqlfluff/sqlfluff/pull/4655) [@yoichi](https://github.com/yoichi)
+* TSQL: `CREATE EXTERNAL DATA SOURCE` [#4634](https://github.com/sqlfluff/sqlfluff/pull/4634) [@keen85](https://github.com/keen85)
+* Safety valve on source fixes [#4640](https://github.com/sqlfluff/sqlfluff/pull/4640) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Add SparkSQL support for LONG primitive type [#4639](https://github.com/sqlfluff/sqlfluff/pull/4639) [@bmorck](https://github.com/bmorck)
+* Fix PIVOT clauses for BigQuery and SparkSQL [#4630](https://github.com/sqlfluff/sqlfluff/pull/4630) [@tunetheweb](https://github.com/tunetheweb)
+* Correct BigQuery WINDOW parsing [#4629](https://github.com/sqlfluff/sqlfluff/pull/4629) [@tunetheweb](https://github.com/tunetheweb)
+* Add Databricks dialect support for Unity Catalog [#4568](https://github.com/sqlfluff/sqlfluff/pull/4568) [@liamperritt](https://github.com/liamperritt)
+* .simple() matching for TypedMatcher [#4612](https://github.com/sqlfluff/sqlfluff/pull/4612) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* --bench output with rule timings [#4601](https://github.com/sqlfluff/sqlfluff/pull/4601) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* MySQL: Unnamed constraints [#4616](https://github.com/sqlfluff/sqlfluff/pull/4616) [@greg-finley](https://github.com/greg-finley)
+* TSQL: Create database scoped credential [#4615](https://github.com/sqlfluff/sqlfluff/pull/4615) [@greg-finley](https://github.com/greg-finley)
+* fix(dialect-clickhouse): Add materialized view statement [#4605](https://github.com/sqlfluff/sqlfluff/pull/4605) [@germainlefebvre4](https://github.com/germainlefebvre4)
+* Nicer formatted dbt errors [#4606](https://github.com/sqlfluff/sqlfluff/pull/4606) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* add parse lambda function Clickhouse [#4611](https://github.com/sqlfluff/sqlfluff/pull/4611) [@konnectr](https://github.com/konnectr)
+* Support `WITH ORDINALITY` clauses in Postgres [#4599](https://github.com/sqlfluff/sqlfluff/pull/4599) [@tunetheweb](https://github.com/tunetheweb)
+
+
+## New Contributors
+* [@germainlefebvre4](https://github.com/germainlefebvre4) made their first contribution in [#4605](https://github.com/sqlfluff/sqlfluff/pull/4605)
+* [@liamperritt](https://github.com/liamperritt) made their first contribution in [#4568](https://github.com/sqlfluff/sqlfluff/pull/4568)
+* [@bmorck](https://github.com/bmorck) made their first contribution in [#4639](https://github.com/sqlfluff/sqlfluff/pull/4639)
+* [@keen85](https://github.com/keen85) made their first contribution in [#4634](https://github.com/sqlfluff/sqlfluff/pull/4634)
+* [@roman-ef](https://github.com/roman-ef) made their first contribution in [#4638](https://github.com/sqlfluff/sqlfluff/pull/4638)
+* [@maiarareinaldo](https://github.com/maiarareinaldo) made their first contribution in [#4636](https://github.com/sqlfluff/sqlfluff/pull/4636)
+* [@fredriv](https://github.com/fredriv) made their first contribution in [#4598](https://github.com/sqlfluff/sqlfluff/pull/4598)
+* [@aly76](https://github.com/aly76) made their first contribution in [#4642](https://github.com/sqlfluff/sqlfluff/pull/4642)
+* [@simpl1g](https://github.com/simpl1g) made their first contribution in [#4618](https://github.com/sqlfluff/sqlfluff/pull/4618)
+
+## [2.0.2] - 2023-03-23
+
+## Highlights
+
+This is primarily a _bugfix_ release. Most notably this solves some of the
+issues introduced in 2.0.1 around spacing within datatypes. Expressions
+like `1.0::double precision` should now be spaced correctly.
+
+Beyond that, this contains a selection of smaller bugfixes and dialect
+improvements. Even for a relatively small release we saw three new
+contributors (thanks [@aurany](https://github.com/aurany), [@JackWolverson](https://github.com/JackWolverson)
+& [@mikaeltw](https://github.com/mikaeltw) 🎉).
+
+The one new _feature_ (as such) is being able to now configure `LT05`
+(aka `layout.long_lines`) to optionally move trailing comments _after_
+the line they are found on, rather than the default behaviour of moving
+them up and _before_. Users can enable this with the `trailing_comments`
+configuration setting in the `indentation` section.
+
+This release _also_ contains some performance optimisations in the parser,
+especially on queries with heavily nested expressions. There will be more
+to come in this space, but we hope this leads to a better experience for
+many users. 🚀
+
+## What’s Changed
+
+* Parse Caching [#4576](https://github.com/sqlfluff/sqlfluff/pull/4576) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Data type spacing [#4592](https://github.com/sqlfluff/sqlfluff/pull/4592) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* MySQL: allow quoted literal in alias name [#4591](https://github.com/sqlfluff/sqlfluff/pull/4591) [@yoichi](https://github.com/yoichi)
+* Make implicit indents visible in the parse tree [#4584](https://github.com/sqlfluff/sqlfluff/pull/4584) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix #4559: TSQL implicit indents on WHERE [#4583](https://github.com/sqlfluff/sqlfluff/pull/4583) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Added keywords to DB2 dialect from IBM docs [#4575](https://github.com/sqlfluff/sqlfluff/pull/4575) [@aurany](https://github.com/aurany)
+* Remove matches_target_tuples (#3873) [#4561](https://github.com/sqlfluff/sqlfluff/pull/4561) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Use terminators in BaseExpression [#4577](https://github.com/sqlfluff/sqlfluff/pull/4577) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Address #1630: Optionally move comments after long line [#4558](https://github.com/sqlfluff/sqlfluff/pull/4558) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Added schema to set statement [#4580](https://github.com/sqlfluff/sqlfluff/pull/4580) [@JackWolverson](https://github.com/JackWolverson)
+* Refactor lint_line_length and fix comma bug [#4564](https://github.com/sqlfluff/sqlfluff/pull/4564) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix untaken indent bug [#4562](https://github.com/sqlfluff/sqlfluff/pull/4562) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* SQLite: Fix SELECT LIMIT [#4566](https://github.com/sqlfluff/sqlfluff/pull/4566) [@greg-finley](https://github.com/greg-finley)
+* Fix #4453: Snowflake semi-stuctured casts in CV11 [#4571](https://github.com/sqlfluff/sqlfluff/pull/4571) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Name of LT07 [#4557](https://github.com/sqlfluff/sqlfluff/pull/4557) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Patch fetch and over [#4555](https://github.com/sqlfluff/sqlfluff/pull/4555) [@mikaeltw](https://github.com/mikaeltw)
+
+## New Contributors
+
+* [@mikaeltw](https://github.com/mikaeltw) made their first contribution in [#4555](https://github.com/sqlfluff/sqlfluff/pull/4555)
+* [@JackWolverson](https://github.com/JackWolverson) made their first contribution in [#4580](https://github.com/sqlfluff/sqlfluff/pull/4580)
+* [@aurany](https://github.com/aurany) made their first contribution in [#4575](https://github.com/sqlfluff/sqlfluff/pull/4575)
+
+## [2.0.1] - 2023-03-17
+
+## Highlights
+
+This is mostly a bugfix release addressing some of the issues from the recent
+2.0 release. Notable fixes are:
+- Spacing for (as applied by `LT01`) for datatypes, hypenated identifiers and
+  casting operators.
+- Several bugs in the indentation routines (`LT02`), in particular with implicit
+  indents.
+- Fixing a conflict between `LT09` and `LT02`, by only limiting `LT09` to bringing
+  targets onto a single line if there is only one select target **and** that it
+  contains no newlines.
+- Supporting arrays, and the new rules configuration more effectively in `pyproject.toml`.
+- Configuring dialects on a file by file basis using inline comments now works.
+
+This release also brings one small new feature in allowing additional flags to
+be passed to SQLFluff when called as a `pre-commit` hook.
+
+Thanks especially to [@JavierMonton](https://github.com/JavierMonton) and
+[@LauraRichter](https://github.com/LauraRichter) who made their first contributions
+to the project as part of this release! 🎉🏆
+
+## What’s Changed
+
+* Add support for arrays in TOML configuration [#4387](https://github.com/sqlfluff/sqlfluff/pull/4387) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Rethink test segregation in CI [#4547](https://github.com/sqlfluff/sqlfluff/pull/4547) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix #4515 and add more test cases [#4525](https://github.com/sqlfluff/sqlfluff/pull/4525) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Add additional flags to `sqlfluff` invocations in pre-commit hooks [#4546](https://github.com/sqlfluff/sqlfluff/pull/4546) [@borchero](https://github.com/borchero)
+* Resolve #4484 (issues with indented_joins indents) [#4544](https://github.com/sqlfluff/sqlfluff/pull/4544) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Per file dialect selection fix [#4518](https://github.com/sqlfluff/sqlfluff/pull/4518) [@LauraRichter](https://github.com/LauraRichter)
+* MySQL: Add CREATE INDEX [#4538](https://github.com/sqlfluff/sqlfluff/pull/4538) [@yoichi](https://github.com/yoichi)
+* Resolve implicit indent issues when catching negative indents [#4543](https://github.com/sqlfluff/sqlfluff/pull/4543) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Github Action Deprecations [#4545](https://github.com/sqlfluff/sqlfluff/pull/4545) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* LT09 and multiline select targets [#4529](https://github.com/sqlfluff/sqlfluff/pull/4529) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Remove Codecov from CI [#4535](https://github.com/sqlfluff/sqlfluff/pull/4535) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Bigquery hyphentated identifiers [#4530](https://github.com/sqlfluff/sqlfluff/pull/4530) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Attempt in-house coverage [#4532](https://github.com/sqlfluff/sqlfluff/pull/4532) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Postgres datatype spacing issues [#4528](https://github.com/sqlfluff/sqlfluff/pull/4528) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Support new rules config in toml files. [#4526](https://github.com/sqlfluff/sqlfluff/pull/4526) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Resolve #1146 (log propagation) [#4513](https://github.com/sqlfluff/sqlfluff/pull/4513) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Snowflake: Optional quotes for `create user` statement [#4514](https://github.com/sqlfluff/sqlfluff/pull/4514) [@JavierMonton](https://github.com/JavierMonton)
+
+## New Contributors
+
+* [@JavierMonton](https://github.com/JavierMonton) made their first contribution in [#4514](https://github.com/sqlfluff/sqlfluff/pull/4514)
+* [@LauraRichter](https://github.com/LauraRichter) made their first contribution in [#4518](https://github.com/sqlfluff/sqlfluff/pull/4518)
+
+## [2.0.0] - 2023-03-13
+
+## Highlights
+
+Upgrading to 2.0 brings several important **breaking changes**:
+
+* All bundled rules have been recoded, both from generic `L00X` formats
+  into groups within similar codes (e.g. an *aliasing* group with codes
+  of the format `AL0X`), but also given *names* to allow much clearer
+  referencing (e.g. `aliasing.column`).
+* [Configuring rules](https://docs.sqlfluff.com/en/latest/configuration.html#rule-configuration)
+  now uses the rule *name* rather than the rule *code* to
+  specify the section. Any unrecognised references in config files (whether
+  they are references which *do* match existing rules by code or alias, or
+  whether the match no rules at all) will raise warnings at runtime.
+* A complete re-write of layout and whitespace handling rules (see
+  [layout](https://docs.sqlfluff.com/en/latest/layout.html)), and with
+  that a change in how layout is configured (see
+  [configuring layout](https://docs.sqlfluff.com/en/latest/layout.html#configuring-layout))
+  and the combination of some rules that were previously separate. One
+  example of this is that the legacy rules `L001`, `L005`, `L006`, `L008`,
+  `L023`, `L024`, `L039`, `L048` & `L071` have been combined simply into
+  [LT01](https://docs.sqlfluff.com/en/latest/rules.html#sqlfluff.rules.sphinx.Rule_LT01).
+* Dropping support for dbt versions before `1.1`.
+
+To help users upgrade to 2.0, we've put together a recommended process
+as part of our [release notes](https://docs.sqlfluff.com/en/latest/releasenotes.html#upgrading-from-1-x-to-2-0).
+
+Beyond the breaking changes, this release brings *a load* of additional
+changes:
+
+* Introduces the the `sqlfluff format` CLI command (a la `sqlfmt` or `black`)
+  to auto-format sql files using a known set of _fairly safe_ rules.
+* Databricks as a distinct new dialect (rather than as previously an alias
+  for `sparksql`).
+* Performance improvements in our parsing engine.
+* Dialect improvements to _almost all of them_.
+
+As a new major release, especially with significant rewrites of large
+portions of the codebase, we recommend using [compatible release](https://peps.python.org/pep-0440/#compatible-release)
+specifiers in your dependencies (i.e. `sqlfluff~=2.0.0`) so that you
+can automatically take advantage of any bugfix releases in the coming
+weeks. The alpha releases of 2.0.0 have been tested on a range of large
+projects, but we know that the range of use cases _"in the wild"_ is
+very diverse. If you do experience issues, please post them
+[on GitHub](https://github.com/sqlfluff/sqlfluff/issues/new/choose)
+in the usual manner.
+
+Finally thanks to everyone who has worked on this release, especially
+[@konnectr](https://github.com/konnectr),
+[@ValentinCrr](https://github.com/ValentinCrr),
+[@FabianScheidt](https://github.com/FabianScheidt),
+[@dflem97](https://github.com/dflem97),
+[@timcosta](https://github.com/timcosta),
+[@AidanHarveyNelson](https://github.com/AidanHarveyNelson),
+[@joar](https://github.com/joar),
+[@jmpfar](https://github.com/jmpfar),
+[@jared-rimmer](https://github.com/jared-rimmer),
+[@vesatoivonen](https://github.com/vesatoivonen),
+[@briankravec](https://github.com/briankravec),
+[@saintamh](https://github.com/saintamh),
+[@tdurieux](https://github.com/tdurieux),
+[@baa-ableton](https://github.com/baa-ableton),
+& [@WillAyd](https://github.com/WillAyd) who made their first contributions
+during the development of 2.0.0. Thanks for your contributions, and
+especially your patience in the slightly slower release of your efforts
+into the wild. 🙏🎉
+
+## What’s Changed
+
+* Revise templating and lexing of calls. [#4506](https://github.com/sqlfluff/sqlfluff/pull/4506) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Struct Access Spacing [#4512](https://github.com/sqlfluff/sqlfluff/pull/4512) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Array and Struct Spacing [#4511](https://github.com/sqlfluff/sqlfluff/pull/4511) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Add a deprecation warning for removed config option. [#4509](https://github.com/sqlfluff/sqlfluff/pull/4509) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Bigquery spacing (#4508) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix #4433 (more untaken positive indents) [#4499](https://github.com/sqlfluff/sqlfluff/pull/4499) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix parse error on double parentheses [#4504](https://github.com/sqlfluff/sqlfluff/pull/4504) [@yoichi](https://github.com/yoichi)
+* 2.0.0 Migration Guide [#4498](https://github.com/sqlfluff/sqlfluff/pull/4498) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Handle missing aliases and align constraints better [#4493](https://github.com/sqlfluff/sqlfluff/pull/4493) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* TSQL: Add support For Clause [#4501](https://github.com/sqlfluff/sqlfluff/pull/4501) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Allow Jinja rule to loop safely [#4495](https://github.com/sqlfluff/sqlfluff/pull/4495) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Trigger CI tests for merge groups [#4503](https://github.com/sqlfluff/sqlfluff/pull/4503) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Update Readme and Contributing [#4502](https://github.com/sqlfluff/sqlfluff/pull/4502) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Update layout docs [#4500](https://github.com/sqlfluff/sqlfluff/pull/4500) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Bug in operator precedence [#4497](https://github.com/sqlfluff/sqlfluff/pull/4497) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* BigQuery: correct query syntax for single column `UNPIVOT` clauses [#4494](https://github.com/sqlfluff/sqlfluff/pull/4494) [@imrehg](https://github.com/imrehg)
+* Fix #4485 [#4491](https://github.com/sqlfluff/sqlfluff/pull/4491) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Update reserved keywords in Athena language [#4490](https://github.com/sqlfluff/sqlfluff/pull/4490) [@ValentinCrr](https://github.com/ValentinCrr)
+* Clickhouse support all join types  [#4488](https://github.com/sqlfluff/sqlfluff/pull/4488) [@konnectr](https://github.com/konnectr)
+* Snowflake semi-structured spacing [#4487](https://github.com/sqlfluff/sqlfluff/pull/4487) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Prep version 2.0.0a6 [#4476](https://github.com/sqlfluff/sqlfluff/pull/4476) [@github-actions](https://github.com/github-actions)
+* Fix #4367 [#4479](https://github.com/sqlfluff/sqlfluff/pull/4479) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Teradata: Improve COLLECT STATS parsing [#4478](https://github.com/sqlfluff/sqlfluff/pull/4478) [@dflem97](https://github.com/dflem97)
+* Add a sqlfluff format CLI command [#4473](https://github.com/sqlfluff/sqlfluff/pull/4473) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Recode and disable L031 -> AL07 [#4471](https://github.com/sqlfluff/sqlfluff/pull/4471) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Named Config (part 2) [#4470](https://github.com/sqlfluff/sqlfluff/pull/4470) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Rule config lookup improvements & config warnings [#4465](https://github.com/sqlfluff/sqlfluff/pull/4465) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Recode L050 [#4468](https://github.com/sqlfluff/sqlfluff/pull/4468) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Implicit indent fixes #4467 [#4469](https://github.com/sqlfluff/sqlfluff/pull/4469) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* ANSI: Add IfExistsGrammar to DropTrigger [#4466](https://github.com/sqlfluff/sqlfluff/pull/4466) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Rules Reorg Mopup [#4462](https://github.com/sqlfluff/sqlfluff/pull/4462) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Layout Rules Recode (part 2) [#4456](https://github.com/sqlfluff/sqlfluff/pull/4456) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* fix(athena): resolve errors parsing around maps, structs, and arrays [#4391](https://github.com/sqlfluff/sqlfluff/pull/4391) [@timcosta](https://github.com/timcosta)
+* Layout Rules Recode (part 1) [#4432](https://github.com/sqlfluff/sqlfluff/pull/4432) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* TSQL: EXEC string literal [#4458](https://github.com/sqlfluff/sqlfluff/pull/4458) [@jpers36](https://github.com/jpers36)
+* Teradata: Added SET QUERY_BAND statement  [#4459](https://github.com/sqlfluff/sqlfluff/pull/4459) [@dflem97](https://github.com/dflem97)
+* Teradata: Added TOP select clause modifier [#4461](https://github.com/sqlfluff/sqlfluff/pull/4461) [@dflem97](https://github.com/dflem97)
+* Teradata: Addition of comparison operator extensions [#4451](https://github.com/sqlfluff/sqlfluff/pull/4451) [@dflem97](https://github.com/dflem97)
+* Add extensions and plugin section to the README.md [#4454](https://github.com/sqlfluff/sqlfluff/pull/4454) [@jared-rimmer](https://github.com/jared-rimmer)
+* Convention rules bundle [#4448](https://github.com/sqlfluff/sqlfluff/pull/4448) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* References rule bundle [#4446](https://github.com/sqlfluff/sqlfluff/pull/4446) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Structure and Ambiguous rule bundles [#4444](https://github.com/sqlfluff/sqlfluff/pull/4444) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* TSQL: Bare functions [#4439](https://github.com/sqlfluff/sqlfluff/pull/4439) [@jpers36](https://github.com/jpers36)
+* Pull dbt CI tests forward to 1.1 and 1.4 [#4442](https://github.com/sqlfluff/sqlfluff/pull/4442) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Teradata: Added "AND STATS" options when creating table [#4440](https://github.com/sqlfluff/sqlfluff/pull/4440) [@dflem97](https://github.com/dflem97)
+* Add Databricks as a distinct dialect [#4438](https://github.com/sqlfluff/sqlfluff/pull/4438) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Remove importlib deprecated methods [#4437](https://github.com/sqlfluff/sqlfluff/pull/4437) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* SQLite: Support PRAGMA statements [#4431](https://github.com/sqlfluff/sqlfluff/pull/4431) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Proposed graceful handling of noqa by L016 (#4248) [#4424](https://github.com/sqlfluff/sqlfluff/pull/4424) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* DuckDb: Allow quoted literals as identifiers [#4410](https://github.com/sqlfluff/sqlfluff/pull/4410) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* SQLite Refactor to reduce statement and keyword scope [#4409](https://github.com/sqlfluff/sqlfluff/pull/4409) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* L046 and L056 recode [#4430](https://github.com/sqlfluff/sqlfluff/pull/4430) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Recode Aliasing Rules [#4427](https://github.com/sqlfluff/sqlfluff/pull/4427) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Adjust MySQL dialect to support combination of not-null, default and … [#4426](https://github.com/sqlfluff/sqlfluff/pull/4426) [@FabianScheidt](https://github.com/FabianScheidt)
+* Revert some changes to tox [#4428](https://github.com/sqlfluff/sqlfluff/pull/4428) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Migrate capitalisation rules to plugin and recode [#4413](https://github.com/sqlfluff/sqlfluff/pull/4413) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Prep version 2.0.0a5 [#4419](https://github.com/sqlfluff/sqlfluff/pull/4419) [@github-actions](https://github.com/github-actions)
+* Handle long lines without trailing newlines gracefully (#4386) [#4423](https://github.com/sqlfluff/sqlfluff/pull/4423) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Resolve #4184 (index error in L007) [#4422](https://github.com/sqlfluff/sqlfluff/pull/4422) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Handle untaken positive indents with taken negative pair. [#4420](https://github.com/sqlfluff/sqlfluff/pull/4420) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Postgres: AS MATERIALIZED support [#4417](https://github.com/sqlfluff/sqlfluff/pull/4417) [@saintamh](https://github.com/saintamh)
+* Align warnings config with example shown [#4421](https://github.com/sqlfluff/sqlfluff/pull/4421) [@briankravec](https://github.com/briankravec)
+* BigQuery: parse "AS description" part of assert expressions [#4418](https://github.com/sqlfluff/sqlfluff/pull/4418) [@yoichi](https://github.com/yoichi)
+* Deprecate doc decorators (replace with metaclass) [#4415](https://github.com/sqlfluff/sqlfluff/pull/4415) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Enable noqa using aliases and groups [#4414](https://github.com/sqlfluff/sqlfluff/pull/4414) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Add rule names to CLI outputs [#4400](https://github.com/sqlfluff/sqlfluff/pull/4400) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Postgres: Remove execution keyword inherited from ANSI [#4411](https://github.com/sqlfluff/sqlfluff/pull/4411) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Rule names, aliases and more complicated selection. [#4399](https://github.com/sqlfluff/sqlfluff/pull/4399) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Postgres: Support Recursive View [#4412](https://github.com/sqlfluff/sqlfluff/pull/4412) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* T-SQL: Implement BULK INSERT statement [#4381](https://github.com/sqlfluff/sqlfluff/pull/4381) [@borchero](https://github.com/borchero)
+* L062: Add match_source (#4172) [#4335](https://github.com/sqlfluff/sqlfluff/pull/4335) [@vesatoivonen](https://github.com/vesatoivonen)
+* TSQL: Add SET to ALTER TABLE [#4407](https://github.com/sqlfluff/sqlfluff/pull/4407) [@jared-rimmer](https://github.com/jared-rimmer)
+* Snowflake: ALTER STORAGE INTEGRATION segment [#4406](https://github.com/sqlfluff/sqlfluff/pull/4406) [@jared-rimmer](https://github.com/jared-rimmer)
+* Fix incorrect link to pre-commit docs [#4405](https://github.com/sqlfluff/sqlfluff/pull/4405) [@pdebelak](https://github.com/pdebelak)
+* Add Snowflake dialect ALTER ROLE segment [#4403](https://github.com/sqlfluff/sqlfluff/pull/4403) [@jared-rimmer](https://github.com/jared-rimmer)
+* Improving Postgres create index statement [#4356](https://github.com/sqlfluff/sqlfluff/pull/4356) [@jmpfar](https://github.com/jmpfar)
+* Resolve #4291: Comments forcing unexpected indents. [#4384](https://github.com/sqlfluff/sqlfluff/pull/4384) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Resolve #4294: Comments affecting indentation [#4337](https://github.com/sqlfluff/sqlfluff/pull/4337) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Resolve #4292: Window function long line fixes [#4383](https://github.com/sqlfluff/sqlfluff/pull/4383) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* TSQL: ALTER INDEX [#4364](https://github.com/sqlfluff/sqlfluff/pull/4364) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Added Varying Keyword to allowed data type segments [#4375](https://github.com/sqlfluff/sqlfluff/pull/4375) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Add ruff linter [#4372](https://github.com/sqlfluff/sqlfluff/pull/4372) [@greg-finley](https://github.com/greg-finley)
+* Fix postgres column constraint default syntax [#4379](https://github.com/sqlfluff/sqlfluff/pull/4379) [@pdebelak](https://github.com/pdebelak)
+* Allow function names to have a leading underscore [#4377](https://github.com/sqlfluff/sqlfluff/pull/4377) [@gavin-tsang](https://github.com/gavin-tsang)
+* TSQL: Merge Hints [#4354](https://github.com/sqlfluff/sqlfluff/pull/4354) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* TSQL: Temporal Table [#4358](https://github.com/sqlfluff/sqlfluff/pull/4358) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* TSQL: ALTER TABLE [#4369](https://github.com/sqlfluff/sqlfluff/pull/4369) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Bugfix: Duckdb SELECT * [#4365](https://github.com/sqlfluff/sqlfluff/pull/4365) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Postgres: TABLESAMPLE query [#4357](https://github.com/sqlfluff/sqlfluff/pull/4357) [@greg-finley](https://github.com/greg-finley)
+* reindent refactor [#4338](https://github.com/sqlfluff/sqlfluff/pull/4338) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Snowflake: INSERT INTO [#4363](https://github.com/sqlfluff/sqlfluff/pull/4363) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Docs: correct toml syntax of pyproject.toml file config example [#4361](https://github.com/sqlfluff/sqlfluff/pull/4361) [@imrehg](https://github.com/imrehg)
+* Allowed Naked Identifiers [#4359](https://github.com/sqlfluff/sqlfluff/pull/4359) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* TSQL: TABLESAMPLE [#4353](https://github.com/sqlfluff/sqlfluff/pull/4353) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Tsql: Function Parameters [#4352](https://github.com/sqlfluff/sqlfluff/pull/4352) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Postgres: Storage parameters [#4350](https://github.com/sqlfluff/sqlfluff/pull/4350) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* TSQL: Bare Function Set [#4351](https://github.com/sqlfluff/sqlfluff/pull/4351) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Postgres: View options [#4340](https://github.com/sqlfluff/sqlfluff/pull/4340) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* BigQuery: SELECT DISTINCT AS STRUCT [#4341](https://github.com/sqlfluff/sqlfluff/pull/4341) [@joar](https://github.com/joar)
+* Snowflake: Fix Alter Warehouse [#4344](https://github.com/sqlfluff/sqlfluff/pull/4344) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Parser: Optimise lookahead_match [#4327](https://github.com/sqlfluff/sqlfluff/pull/4327) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Add support for dbt test macros [#4319](https://github.com/sqlfluff/sqlfluff/pull/4319) [@pdebelak](https://github.com/pdebelak)
+* Bracket complex expressions before applying :: operator in Rule L067 [#4326](https://github.com/sqlfluff/sqlfluff/pull/4326) [@pdebelak](https://github.com/pdebelak)
+* Prep version 2.0.0a4 [#4322](https://github.com/sqlfluff/sqlfluff/pull/4322) [@github-actions](https://github.com/github-actions)
+* BigQuery: Alter table alter column [#4316](https://github.com/sqlfluff/sqlfluff/pull/4316) [@greg-finley](https://github.com/greg-finley)
+* Handle renamed dbt exceptions [#4317](https://github.com/sqlfluff/sqlfluff/pull/4317) [@greg-finley](https://github.com/greg-finley)
+* Parser: Fix early exit for simple matchers [#4305](https://github.com/sqlfluff/sqlfluff/pull/4305) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* MySQL: Add CREATE DATABASE and ALTER DATABASE [#4307](https://github.com/sqlfluff/sqlfluff/pull/4307) [@yoichi](https://github.com/yoichi)
+* BigQuery: Add ALTER VIEW [#4306](https://github.com/sqlfluff/sqlfluff/pull/4306) [@yoichi](https://github.com/yoichi)
+* toml: only install `toml` dependency if < Python 3.11 (otherwise use builtin `tomllib`) [#4303](https://github.com/sqlfluff/sqlfluff/pull/4303) [@kevinmarsh](https://github.com/kevinmarsh)
+* Fix #4024 example plugin unit tests import [#4302](https://github.com/sqlfluff/sqlfluff/pull/4302) [@matthieucan](https://github.com/matthieucan)
+* Prep version 2.0.0a3 [#4290](https://github.com/sqlfluff/sqlfluff/pull/4290) [@github-actions](https://github.com/github-actions)
+* Move ISSUE from Snwoflake reserved keywords to unreserved ones [#4279](https://github.com/sqlfluff/sqlfluff/pull/4279) [@KaoutherElhamdi](https://github.com/KaoutherElhamdi)
+* Due to performance and other issues, revert the osmosis implementation of the templater for now [#4273](https://github.com/sqlfluff/sqlfluff/pull/4273) [@barrywhart](https://github.com/barrywhart)
+* Simplify lexing [#4289](https://github.com/sqlfluff/sqlfluff/pull/4289) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix #4255 (Fix exception on mixed indent description) [#4288](https://github.com/sqlfluff/sqlfluff/pull/4288) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix #4253 (incorrect trigger of L006 around placeholders) [#4287](https://github.com/sqlfluff/sqlfluff/pull/4287) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix #4249 (TSQL block comment indents) [#4286](https://github.com/sqlfluff/sqlfluff/pull/4286) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix #4252 (Resolve multiple sensible indents) [#4285](https://github.com/sqlfluff/sqlfluff/pull/4285) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Parser Performance: Cache segment string repr to reduce function calls [#4278](https://github.com/sqlfluff/sqlfluff/pull/4278) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Snowflake: GRANT SUPPORT CASES [#4283](https://github.com/sqlfluff/sqlfluff/pull/4283) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Dialect: duckdb [#4284](https://github.com/sqlfluff/sqlfluff/pull/4284) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Snowflake: Add variable pattern to CopyIntoTable [#4275](https://github.com/sqlfluff/sqlfluff/pull/4275) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Postgres: Non-reserved keyword bugfix [#4277](https://github.com/sqlfluff/sqlfluff/pull/4277) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Hive: Add Table constraints DISABLE VALIDATE [#4281](https://github.com/sqlfluff/sqlfluff/pull/4281) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Snowflake: Add Python and Java UDF support [#4280](https://github.com/sqlfluff/sqlfluff/pull/4280) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* SparkSQL: Support DIV binary operator [#4282](https://github.com/sqlfluff/sqlfluff/pull/4282) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* BigQuery: Add ALTER TABLE [#4272](https://github.com/sqlfluff/sqlfluff/pull/4272) [@yoichi](https://github.com/yoichi)
+* Snowflake: Update bare functions [#4276](https://github.com/sqlfluff/sqlfluff/pull/4276) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Improve Dockerfile to reduce image size [#4262](https://github.com/sqlfluff/sqlfluff/pull/4262) [@tdurieux](https://github.com/tdurieux)
+* Prep version 2.0.0a2 [#4247](https://github.com/sqlfluff/sqlfluff/pull/4247) [@github-actions](https://github.com/github-actions)
+* Push indents to after comments [#4239](https://github.com/sqlfluff/sqlfluff/pull/4239) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Templated fix improvements and indentation [#4245](https://github.com/sqlfluff/sqlfluff/pull/4245) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix block comment indent fixes #4224 [#4240](https://github.com/sqlfluff/sqlfluff/pull/4240) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix for #4222 [#4236](https://github.com/sqlfluff/sqlfluff/pull/4236) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Snowflake: Allow multiple unpivot [#4242](https://github.com/sqlfluff/sqlfluff/pull/4242) [@greg-finley](https://github.com/greg-finley)
+* postgres: add row-level locks to SELECT statements [#4209](https://github.com/sqlfluff/sqlfluff/pull/4209) [@Yiwen-Gao](https://github.com/Yiwen-Gao)
+* Add more parsing logic for db2 [#4206](https://github.com/sqlfluff/sqlfluff/pull/4206) [@NelsonTorres](https://github.com/NelsonTorres)
+* Include the filename in critical exceptions [#4225](https://github.com/sqlfluff/sqlfluff/pull/4225) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Update Readme Badges [#4219](https://github.com/sqlfluff/sqlfluff/pull/4219) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* diff-quality: Handle the case where there are no files to check [#4220](https://github.com/sqlfluff/sqlfluff/pull/4220) [@barrywhart](https://github.com/barrywhart)
+* Prep version 2.0.0a1 [#4203](https://github.com/sqlfluff/sqlfluff/pull/4203) [@github-actions](https://github.com/github-actions)
+* Fixed False Positive for L037 [#4198](https://github.com/sqlfluff/sqlfluff/pull/4198) [@WillAyd](https://github.com/WillAyd)
+* Fix #4215 [#4217](https://github.com/sqlfluff/sqlfluff/pull/4217) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* don't consider templated whitespace [#4213](https://github.com/sqlfluff/sqlfluff/pull/4213) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* show fatal errors regardless [#4214](https://github.com/sqlfluff/sqlfluff/pull/4214) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* don't pickle the templater [#4208](https://github.com/sqlfluff/sqlfluff/pull/4208) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* MySQL: Support column character set and collation [#4204](https://github.com/sqlfluff/sqlfluff/pull/4204) [@yoichi](https://github.com/yoichi)
+* Fix some issues with Docker Compose environment [#4201](https://github.com/sqlfluff/sqlfluff/pull/4201) [@barrywhart](https://github.com/barrywhart)
+* Implicit Indents [#4054](https://github.com/sqlfluff/sqlfluff/pull/4054) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Tweak Coveralls settings [#4199](https://github.com/sqlfluff/sqlfluff/pull/4199) [@barrywhart](https://github.com/barrywhart)
+* In addition to Codecov, also upload to Coveralls [#4197](https://github.com/sqlfluff/sqlfluff/pull/4197) [@barrywhart](https://github.com/barrywhart)
+* Fix: create table default cast returns unparsable section [#4192](https://github.com/sqlfluff/sqlfluff/pull/4192) [@NelsonTorres](https://github.com/NelsonTorres)
+* Fix JSON parsing issue with diff-quality plugin [#4190](https://github.com/sqlfluff/sqlfluff/pull/4190) [@barrywhart](https://github.com/barrywhart)
+* Codecov migration [#4195](https://github.com/sqlfluff/sqlfluff/pull/4195) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Stop adding trailing os.sep if ignore file is on the root of the file… [#4182](https://github.com/sqlfluff/sqlfluff/pull/4182) [@baa-ableton](https://github.com/baa-ableton)
+* Port dbt-osmosis templater changes to SQLFluff [#3976](https://github.com/sqlfluff/sqlfluff/pull/3976) [@barrywhart](https://github.com/barrywhart)
+* Reflow 4: Long Lines [#4067](https://github.com/sqlfluff/sqlfluff/pull/4067) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix comment bug on reindent [#4179](https://github.com/sqlfluff/sqlfluff/pull/4179) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Reflow 3: Reindent [#3942](https://github.com/sqlfluff/sqlfluff/pull/3942) [@alanmcruickshank](https://github.com/alanmcruickshank)
+
+## New Contributors
+
+* [@konnectr](https://github.com/konnectr) made their first contribution in [#4488](https://github.com/sqlfluff/sqlfluff/pull/4488)
+* [@ValentinCrr](https://github.com/ValentinCrr) made their first contribution in [#4490](https://github.com/sqlfluff/sqlfluff/pull/4490)
+* [@FabianScheidt](https://github.com/FabianScheidt) made their first contribution in [#4426](https://github.com/sqlfluff/sqlfluff/pull/4426)
+* [@dflem97](https://github.com/dflem97) made their first contribution in [#4440](https://github.com/sqlfluff/sqlfluff/pull/4440)
+* [@timcosta](https://github.com/timcosta) made their first contribution in [#4391](https://github.com/sqlfluff/sqlfluff/pull/4391)
+* [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) made their first contribution in [#4344](https://github.com/sqlfluff/sqlfluff/pull/4344)
+* [@joar](https://github.com/joar) made their first contribution in [#4341](https://github.com/sqlfluff/sqlfluff/pull/4341)
+* [@jmpfar](https://github.com/jmpfar) made their first contribution in [#4356](https://github.com/sqlfluff/sqlfluff/pull/4356)
+* [@jared-rimmer](https://github.com/jared-rimmer) made their first contribution in [#4403](https://github.com/sqlfluff/sqlfluff/pull/4403)
+* [@vesatoivonen](https://github.com/vesatoivonen) made their first contribution in [#4335](https://github.com/sqlfluff/sqlfluff/pull/4335)
+* [@briankravec](https://github.com/briankravec) made their first contribution in [#4421](https://github.com/sqlfluff/sqlfluff/pull/4421)
+* [@saintamh](https://github.com/saintamh) made their first contribution in [#4417](https://github.com/sqlfluff/sqlfluff/pull/4417)
+* [@tdurieux](https://github.com/tdurieux) made their first contribution in [#4262](https://github.com/sqlfluff/sqlfluff/pull/4262)
+* [@baa-ableton](https://github.com/baa-ableton) made their first contribution in [#4182](https://github.com/sqlfluff/sqlfluff/pull/4182)
+* [@WillAyd](https://github.com/WillAyd) made their first contribution in [#4198](https://github.com/sqlfluff/sqlfluff/pull/4198)
+
+## [2.0.0a6] - 2023-03-06
+
+> NOTE: This is effectively a release candidate for testing purposes.
+> There are several new features here, and breaking changes to
+> configuration. We welcome testing feedback from the community, and
+> the intent is that following this release there will be no more
+> major breaking changes in the before the 2.0.0 release.
+
+## Highlights
+
+This is the sixth alpha release for 2.0.0, and effectively the first release
+candidate for 2.0.0. All the intended breaking changes for the upcoming
+release have now been made and only bugfixes and non breaking feature
+changes should happen between this release and the full release.
+
+It contains:
+* A reorganisation of rules. All rules have been recoded, and can now be
+  referred to by their name, code, alias or group. The legacy code for the
+  rule is included as an alias for each rule to support some backward
+  compatibility.
+* Configuration files (and inline configuration flags), should now use the
+  **name** of the rule rather than the **code**. Any configuration files
+  which reference using legacy rules (or reference unknown rules) should
+  now display warnings.
+* Introduces the the `sqlfluff format` CLI command (a la `sqlfmt` or `black`)
+  to auto-format sql files using a known set of _fairly safe_ rules.
+* Databricks as a distinct new dialect (rather than as previously an alias
+  for `sparksql`).
+
+There are also numerous dialect improvements to ANSI, Athena, TSQL, Teradata,
+SQLite & MySQL.
+
+## What’s Changed
+
+* Fix #4367 [#4479](https://github.com/sqlfluff/sqlfluff/pull/4479) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Teradata: Improve COLLECT STATS parsing [#4478](https://github.com/sqlfluff/sqlfluff/pull/4478) [@dflem97](https://github.com/dflem97)
+* Add a sqlfluff format CLI command [#4473](https://github.com/sqlfluff/sqlfluff/pull/4473) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Recode and disable L031 -> AL07 [#4471](https://github.com/sqlfluff/sqlfluff/pull/4471) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Named Config (part 2) [#4470](https://github.com/sqlfluff/sqlfluff/pull/4470) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Rule config lookup improvements & config warnings [#4465](https://github.com/sqlfluff/sqlfluff/pull/4465) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Recode L050 [#4468](https://github.com/sqlfluff/sqlfluff/pull/4468) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Implicit indent fixes #4467 [#4469](https://github.com/sqlfluff/sqlfluff/pull/4469) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* ANSI: Add IfExistsGrammar to DropTrigger [#4466](https://github.com/sqlfluff/sqlfluff/pull/4466) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Rules Reorg Mopup [#4462](https://github.com/sqlfluff/sqlfluff/pull/4462) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Layout Rules Recode (part 2) [#4456](https://github.com/sqlfluff/sqlfluff/pull/4456) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* fix(athena): resolve errors parsing around maps, structs, and arrays [#4391](https://github.com/sqlfluff/sqlfluff/pull/4391) [@timcosta](https://github.com/timcosta)
+* Layout Rules Recode (part 1) [#4432](https://github.com/sqlfluff/sqlfluff/pull/4432) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* TSQL: EXEC string literal [#4458](https://github.com/sqlfluff/sqlfluff/pull/4458) [@jpers36](https://github.com/jpers36)
+* Teradata: Added SET QUERY_BAND statement  [#4459](https://github.com/sqlfluff/sqlfluff/pull/4459) [@dflem97](https://github.com/dflem97)
+* Teradata: Added TOP select clause modifier [#4461](https://github.com/sqlfluff/sqlfluff/pull/4461) [@dflem97](https://github.com/dflem97)
+* Teradata: Addition of comparison operator extensions [#4451](https://github.com/sqlfluff/sqlfluff/pull/4451) [@dflem97](https://github.com/dflem97)
+* Add extensions and plugin section to the README.md [#4454](https://github.com/sqlfluff/sqlfluff/pull/4454) [@jared-rimmer](https://github.com/jared-rimmer)
+* Convention rules bundle [#4448](https://github.com/sqlfluff/sqlfluff/pull/4448) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* References rule bundle [#4446](https://github.com/sqlfluff/sqlfluff/pull/4446) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Structure and Ambiguous rule bundles [#4444](https://github.com/sqlfluff/sqlfluff/pull/4444) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* TSQL: Bare functions [#4439](https://github.com/sqlfluff/sqlfluff/pull/4439) [@jpers36](https://github.com/jpers36)
+* Pull dbt CI tests forward to 1.1 and 1.4 [#4442](https://github.com/sqlfluff/sqlfluff/pull/4442) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Teradata: Added "AND STATS" options when creating table [#4440](https://github.com/sqlfluff/sqlfluff/pull/4440) [@dflem97](https://github.com/dflem97)
+* Add Databricks as a distinct dialect [#4438](https://github.com/sqlfluff/sqlfluff/pull/4438) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Remove importlib deprecated methods [#4437](https://github.com/sqlfluff/sqlfluff/pull/4437) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* SQLite: Support PRAGMA statements [#4431](https://github.com/sqlfluff/sqlfluff/pull/4431) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Proposed graceful handling of noqa by L016 (#4248) [#4424](https://github.com/sqlfluff/sqlfluff/pull/4424) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* DuckDb: Allow quoted literals as identifiers [#4410](https://github.com/sqlfluff/sqlfluff/pull/4410) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* SQLite Refactor to reduce statement and keyword scope [#4409](https://github.com/sqlfluff/sqlfluff/pull/4409) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* L046 and L056 recode [#4430](https://github.com/sqlfluff/sqlfluff/pull/4430) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Recode Aliasing Rules [#4427](https://github.com/sqlfluff/sqlfluff/pull/4427) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Adjust MySQL dialect to support combination of not-null, default and … [#4426](https://github.com/sqlfluff/sqlfluff/pull/4426) [@FabianScheidt](https://github.com/FabianScheidt)
+* Revert some changes to tox [#4428](https://github.com/sqlfluff/sqlfluff/pull/4428) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Migrate capitalisation rules to plugin and recode [#4413](https://github.com/sqlfluff/sqlfluff/pull/4413) [@alanmcruickshank](https://github.com/alanmcruickshank)
+
+## New Contributors
+
+* [@FabianScheidt](https://github.com/FabianScheidt) made their first contribution in [#4426](https://github.com/sqlfluff/sqlfluff/pull/4426)
+* [@dflem97](https://github.com/dflem97) made their first contribution in [#4440](https://github.com/sqlfluff/sqlfluff/pull/4440)
+* [@timcosta](https://github.com/timcosta) made their first contribution in [#4391](https://github.com/sqlfluff/sqlfluff/pull/4391)
+
+## [2.0.0a5] - 2023-02-24
+
+> NOTE: This is an alpha release for testing purposes. There are several new features
+> here, and breaking changes to configuration. We welcome testing feedback from the
+> community, but know that this release may feel less polished than usual.
+
+## Highlights
+
+This is the fifth alpha release for 2.0.0. It contains:
+* Significant rework to rule naming and categorisation.
+* Several performance improvements.
+* Many dialect improvements to several dialects.
+* Bugfixes to many of the issues raised in 2.0.0a4.
+
+There will likely be more changes to rule classification before a full release of 2.0.0,
+so anticipate that configuration files may change slightly again in future alpha releases.
+
+## What’s Changed
+
+* Handle long lines without trailing newlines gracefully (#4386) [#4423](https://github.com/sqlfluff/sqlfluff/pull/4423) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Resolve #4184 (index error in L007) [#4422](https://github.com/sqlfluff/sqlfluff/pull/4422) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Handle untaken positive indents with taken negative pair. [#4420](https://github.com/sqlfluff/sqlfluff/pull/4420) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Postgres: AS MATERIALIZED support [#4417](https://github.com/sqlfluff/sqlfluff/pull/4417) [@saintamh](https://github.com/saintamh)
+* Align warnings config with example shown [#4421](https://github.com/sqlfluff/sqlfluff/pull/4421) [@briankravec](https://github.com/briankravec)
+* BigQuery: parse "AS description" part of assert expressions [#4418](https://github.com/sqlfluff/sqlfluff/pull/4418) [@yoichi](https://github.com/yoichi)
+* Deprecate doc decorators (replace with metaclass) [#4415](https://github.com/sqlfluff/sqlfluff/pull/4415) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Enable noqa using aliases and groups [#4414](https://github.com/sqlfluff/sqlfluff/pull/4414) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Add rule names to CLI outputs [#4400](https://github.com/sqlfluff/sqlfluff/pull/4400) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Postgres: Remove execution keyword inherited from ANSI [#4411](https://github.com/sqlfluff/sqlfluff/pull/4411) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Rule names, aliases and more complicated selection. [#4399](https://github.com/sqlfluff/sqlfluff/pull/4399) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Postgres: Support Recursive View [#4412](https://github.com/sqlfluff/sqlfluff/pull/4412) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* T-SQL: Implement BULK INSERT statement [#4381](https://github.com/sqlfluff/sqlfluff/pull/4381) [@borchero](https://github.com/borchero)
+* L062: Add match_source (#4172) [#4335](https://github.com/sqlfluff/sqlfluff/pull/4335) [@vesatoivonen](https://github.com/vesatoivonen)
+* TSQL: Add SET to ALTER TABLE [#4407](https://github.com/sqlfluff/sqlfluff/pull/4407) [@jared-rimmer](https://github.com/jared-rimmer)
+* Snowflake: ALTER STORAGE INTEGRATION segment [#4406](https://github.com/sqlfluff/sqlfluff/pull/4406) [@jared-rimmer](https://github.com/jared-rimmer)
+* Fix incorrect link to pre-commit docs [#4405](https://github.com/sqlfluff/sqlfluff/pull/4405) [@pdebelak](https://github.com/pdebelak)
+* Add Snowflake dialect ALTER ROLE segment [#4403](https://github.com/sqlfluff/sqlfluff/pull/4403) [@jared-rimmer](https://github.com/jared-rimmer)
+* Improving Postgres create index statement [#4356](https://github.com/sqlfluff/sqlfluff/pull/4356) [@jmpfar](https://github.com/jmpfar)
+* Resolve #4291: Comments forcing unexpected indents. [#4384](https://github.com/sqlfluff/sqlfluff/pull/4384) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Resolve #4294: Comments affecting indentation [#4337](https://github.com/sqlfluff/sqlfluff/pull/4337) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Resolve #4292: Window function long line fixes [#4383](https://github.com/sqlfluff/sqlfluff/pull/4383) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* TSQL: ALTER INDEX [#4364](https://github.com/sqlfluff/sqlfluff/pull/4364) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Added Varying Keyword to allowed data type segments [#4375](https://github.com/sqlfluff/sqlfluff/pull/4375) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Add ruff linter [#4372](https://github.com/sqlfluff/sqlfluff/pull/4372) [@greg-finley](https://github.com/greg-finley)
+* Fix postgres column constraint default syntax [#4379](https://github.com/sqlfluff/sqlfluff/pull/4379) [@pdebelak](https://github.com/pdebelak)
+* Allow function names to have a leading underscore [#4377](https://github.com/sqlfluff/sqlfluff/pull/4377) [@gavin-tsang](https://github.com/gavin-tsang)
+* TSQL: Merge Hints [#4354](https://github.com/sqlfluff/sqlfluff/pull/4354) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* TSQL: Temporal Table [#4358](https://github.com/sqlfluff/sqlfluff/pull/4358) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* TSQL: ALTER TABLE [#4369](https://github.com/sqlfluff/sqlfluff/pull/4369) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Bugfix: Duckdb SELECT * [#4365](https://github.com/sqlfluff/sqlfluff/pull/4365) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Postgres: TABLESAMPLE query [#4357](https://github.com/sqlfluff/sqlfluff/pull/4357) [@greg-finley](https://github.com/greg-finley)
+* reindent refactor [#4338](https://github.com/sqlfluff/sqlfluff/pull/4338) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Snowflake: INSERT INTO [#4363](https://github.com/sqlfluff/sqlfluff/pull/4363) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Docs: correct toml syntax of pyproject.toml file config example [#4361](https://github.com/sqlfluff/sqlfluff/pull/4361) [@imrehg](https://github.com/imrehg)
+* Allowed Naked Identifiers [#4359](https://github.com/sqlfluff/sqlfluff/pull/4359) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* TSQL: TABLESAMPLE [#4353](https://github.com/sqlfluff/sqlfluff/pull/4353) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Tsql: Function Parameters [#4352](https://github.com/sqlfluff/sqlfluff/pull/4352) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Postgres: Storage parameters [#4350](https://github.com/sqlfluff/sqlfluff/pull/4350) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* TSQL: Bare Function Set [#4351](https://github.com/sqlfluff/sqlfluff/pull/4351) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Postgres: View options [#4340](https://github.com/sqlfluff/sqlfluff/pull/4340) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* BigQuery: SELECT DISTINCT AS STRUCT [#4341](https://github.com/sqlfluff/sqlfluff/pull/4341) [@joar](https://github.com/joar)
+* Snowflake: Fix Alter Warehouse [#4344](https://github.com/sqlfluff/sqlfluff/pull/4344) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson)
+* Parser: Optimise lookahead_match [#4327](https://github.com/sqlfluff/sqlfluff/pull/4327) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Add support for dbt test macros [#4319](https://github.com/sqlfluff/sqlfluff/pull/4319) [@pdebelak](https://github.com/pdebelak)
+* Bracket complex expressions before applying :: operator in Rule L067 [#4326](https://github.com/sqlfluff/sqlfluff/pull/4326) [@pdebelak](https://github.com/pdebelak)
+
+## New Contributors
+* [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) made their first contribution in [#4344](https://github.com/sqlfluff/sqlfluff/pull/4344)
+* [@joar](https://github.com/joar) made their first contribution in [#4341](https://github.com/sqlfluff/sqlfluff/pull/4341)
+* [@jmpfar](https://github.com/jmpfar) made their first contribution in [#4356](https://github.com/sqlfluff/sqlfluff/pull/4356)
+* [@jared-rimmer](https://github.com/jared-rimmer) made their first contribution in [#4403](https://github.com/sqlfluff/sqlfluff/pull/4403)
+* [@vesatoivonen](https://github.com/vesatoivonen) made their first contribution in [#4335](https://github.com/sqlfluff/sqlfluff/pull/4335)
+* [@briankravec](https://github.com/briankravec) made their first contribution in [#4421](https://github.com/sqlfluff/sqlfluff/pull/4421)
+* [@saintamh](https://github.com/saintamh) made their first contribution in [#4417](https://github.com/sqlfluff/sqlfluff/pull/4417)
+
+## [2.0.0a4] - 2023-01-26
+
+## Highlights
+
+This is the fourth alpha release for 2.0.0. It contains a fix for the renamed dbt exceptions in dbt version 1.4.0, a fix for a major performance issue with the 2.0 dbt templater, and improvements to parse performance of large SQL files.
+
+## What’s Changed
+
+* BigQuery: Alter table alter column [#4316](https://github.com/sqlfluff/sqlfluff/pull/4316) [@greg-finley](https://github.com/greg-finley)
+* Handle renamed dbt exceptions [#4317](https://github.com/sqlfluff/sqlfluff/pull/4317) [@greg-finley](https://github.com/greg-finley)
+* Parser: Fix early exit for simple matchers [#4305](https://github.com/sqlfluff/sqlfluff/pull/4305) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* MySQL: Add CREATE DATABASE and ALTER DATABASE [#4307](https://github.com/sqlfluff/sqlfluff/pull/4307) [@yoichi](https://github.com/yoichi)
+* BigQuery: Add ALTER VIEW [#4306](https://github.com/sqlfluff/sqlfluff/pull/4306) [@yoichi](https://github.com/yoichi)
+* toml: only install `toml` dependency if < Python 3.11 (otherwise use builtin `tomllib`) [#4303](https://github.com/sqlfluff/sqlfluff/pull/4303) [@kevinmarsh](https://github.com/kevinmarsh)
+* Fix #4024 example plugin unit tests import [#4302](https://github.com/sqlfluff/sqlfluff/pull/4302) [@matthieucan](https://github.com/matthieucan)
+
+## [2.0.0a3] - 2023-01-16
+
+> NOTE: This is an alpha release for testing purposes. There are several new features
+> here, and breaking changes to configuration. We welcome testing feedback from the
+> community, but know that this release may feel less polished than usual.
+
+## Highlights
+
+This is the third alpha release for 2.0.0. It contains primarily bugfixes from 2.0.0a2
+to allow continued testing. In particular, some of the recent 2.0.0-related changes to the
+dbt templater have been reverted, primarily due to performance and other issues. If
+those issues can be resolved, the changes will be re-introduced. The long-term goal of
+this work is to ease maintenance of the templater by separating dbt integration concerns
+from SQLFluff concerns.
+
+There will likely be more changes to rule classification before a full release of 2.0.0,
+so anticipate that configuration files may change slightly again in future alpha releases.
+
+## What’s Changed
+
+* Move ISSUE from Snowflake reserved keywords to unreserved ones [#4279](https://github.com/sqlfluff/sqlfluff/pull/4279) [@KaoutherElhamdi](https://github.com/KaoutherElhamdi)
+* Due to performance and other issues, revert the osmosis implementation of the templater for now [#4273](https://github.com/sqlfluff/sqlfluff/pull/4273) [@barrywhart](https://github.com/barrywhart)
+* Simplify lexing [#4289](https://github.com/sqlfluff/sqlfluff/pull/4289) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix #4255 (Fix exception on mixed indent description) [#4288](https://github.com/sqlfluff/sqlfluff/pull/4288) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix #4253 (incorrect trigger of L006 around placeholders) [#4287](https://github.com/sqlfluff/sqlfluff/pull/4287) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix #4249 (TSQL block comment indents) [#4286](https://github.com/sqlfluff/sqlfluff/pull/4286) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix #4252 (Resolve multiple sensible indents) [#4285](https://github.com/sqlfluff/sqlfluff/pull/4285) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Parser Performance: Cache segment string repr to reduce function calls [#4278](https://github.com/sqlfluff/sqlfluff/pull/4278) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Snowflake: GRANT SUPPORT CASES [#4283](https://github.com/sqlfluff/sqlfluff/pull/4283) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Dialect: duckdb [#4284](https://github.com/sqlfluff/sqlfluff/pull/4284) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Snowflake: Add variable pattern to CopyIntoTable [#4275](https://github.com/sqlfluff/sqlfluff/pull/4275) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Postgres: Non-reserved keyword bugfix [#4277](https://github.com/sqlfluff/sqlfluff/pull/4277) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Hive: Add Table constraints DISABLE VALIDATE [#4281](https://github.com/sqlfluff/sqlfluff/pull/4281) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Snowflake: Add Python and Java UDF support [#4280](https://github.com/sqlfluff/sqlfluff/pull/4280) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* SparkSQL: Support DIV binary operator [#4282](https://github.com/sqlfluff/sqlfluff/pull/4282) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* BigQuery: Add ALTER TABLE [#4272](https://github.com/sqlfluff/sqlfluff/pull/4272) [@yoichi](https://github.com/yoichi)
+* Snowflake: Update bare functions [#4276](https://github.com/sqlfluff/sqlfluff/pull/4276) [@WittierDinosaur](https://github.com/WittierDinosaur)
+* Improve Dockerfile to reduce image size [#4262](https://github.com/sqlfluff/sqlfluff/pull/4262) [@tdurieux](https://github.com/tdurieux)
+
+## New Contributors
+
+* [@tdurieux](https://github.com/tdurieux) made their first contribution in [#4262](https://github.com/sqlfluff/sqlfluff/pull/4262)
+
+## [2.0.0a2] - 2023-01-07
+
+## Highlights
+
+This is the second alpha release for 2.0.0. It contains primarily bugfixes from 2.0.0a1
+to allow continued testing along with dialect improvements for Snowflake, Postgres and DB2.
+
+## What’s Changed
+
+* Push indents to after comments [#4239](https://github.com/sqlfluff/sqlfluff/pull/4239) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Templated fix improvements and indentation [#4245](https://github.com/sqlfluff/sqlfluff/pull/4245) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix block comment indent fixes #4224 [#4240](https://github.com/sqlfluff/sqlfluff/pull/4240) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix for #4222 [#4236](https://github.com/sqlfluff/sqlfluff/pull/4236) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Snowflake: Allow multiple unpivot [#4242](https://github.com/sqlfluff/sqlfluff/pull/4242) [@greg-finley](https://github.com/greg-finley)
+* postgres: add row-level locks to SELECT statements [#4209](https://github.com/sqlfluff/sqlfluff/pull/4209) [@Yiwen-Gao](https://github.com/Yiwen-Gao)
+* Add more parsing logic for db2 [#4206](https://github.com/sqlfluff/sqlfluff/pull/4206) [@NelsonTorres](https://github.com/NelsonTorres)
+* Include the filename in critical exceptions [#4225](https://github.com/sqlfluff/sqlfluff/pull/4225) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Update Readme Badges [#4219](https://github.com/sqlfluff/sqlfluff/pull/4219) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* diff-quality: Handle the case where there are no files to check [#4220](https://github.com/sqlfluff/sqlfluff/pull/4220) [@barrywhart](https://github.com/barrywhart)
+
+## [2.0.0a1] - 2022-12-28
+
+## Highlights
+
+This is the first alpha version for 2.0.0. It brings all of the changes to whitespace
+handing, including a total rewrite of indentation and long line logic (L003 & L016).
+That brings several breaking changes to the configuration of layout, see the
+[layout docs](https://docs.sqlfluff.com/en/stable/layout.html) for more details and
+familiarise yourself with the new
+[default configuration](https://docs.sqlfluff.com/en/stable/configuration.html#default-configuration).
+
+In addition, for the dbt templater, this introduces a large re-write of the codebase,
+dropping support for dbt versions before 1.0.0. This leverages functionality from
+[dbt-osmosis](https://github.com/z3z1ma/dbt-osmosis) to reduce the amount of
+functionality supported directly by SQLFluff, and performance during testing of the new
+version has been reported as significantly faster.
+
+## What’s Changed
+
+* Fixed False Positive for L037 [#4198](https://github.com/sqlfluff/sqlfluff/pull/4198) [@WillAyd](https://github.com/WillAyd)
+* Indentation bug [#4217](https://github.com/sqlfluff/sqlfluff/pull/4217) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Show fatal errors regardless [#4214](https://github.com/sqlfluff/sqlfluff/pull/4214) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Don't consider templated whitespace [#4213](https://github.com/sqlfluff/sqlfluff/pull/4213) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Don't pickle the templater [#4208](https://github.com/sqlfluff/sqlfluff/pull/4208) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* MySQL: Support column character set and collation [#4204](https://github.com/sqlfluff/sqlfluff/pull/4204) [@yoichi](https://github.com/yoichi)
+* Fix some issues with Docker Compose environment [#4201](https://github.com/sqlfluff/sqlfluff/pull/4201) [@barrywhart](https://github.com/barrywhart)
+* Implicit Indents [#4054](https://github.com/sqlfluff/sqlfluff/pull/4054) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Tweak Coveralls settings [#4199](https://github.com/sqlfluff/sqlfluff/pull/4199) [@barrywhart](https://github.com/barrywhart)
+* In addition to Codecov, also upload to Coveralls [#4197](https://github.com/sqlfluff/sqlfluff/pull/4197) [@barrywhart](https://github.com/barrywhart)
+* Fix: create table default cast returns unparsable section [#4192](https://github.com/sqlfluff/sqlfluff/pull/4192) [@NelsonTorres](https://github.com/NelsonTorres)
+* Fix JSON parsing issue with diff-quality plugin [#4190](https://github.com/sqlfluff/sqlfluff/pull/4190) [@barrywhart](https://github.com/barrywhart)
+* Codecov migration [#4195](https://github.com/sqlfluff/sqlfluff/pull/4195) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Stop adding trailing os.sep if ignore file is on the root of the file… [#4182](https://github.com/sqlfluff/sqlfluff/pull/4182) [@baa-ableton](https://github.com/baa-ableton)
+* Port dbt-osmosis templater changes to SQLFluff [#3976](https://github.com/sqlfluff/sqlfluff/pull/3976) [@barrywhart](https://github.com/barrywhart)
+* Reflow 4: Long Lines [#4067](https://github.com/sqlfluff/sqlfluff/pull/4067) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Fix comment bug on reindent [#4179](https://github.com/sqlfluff/sqlfluff/pull/4179) [@alanmcruickshank](https://github.com/alanmcruickshank)
+* Reflow 3: Reindent [#3942](https://github.com/sqlfluff/sqlfluff/pull/3942) [@alanmcruickshank](https://github.com/alanmcruickshank)
+
+## New Contributors
+
+* [@baa-ableton](https://github.com/baa-ableton) made their first contribution in [#4182](https://github.com/sqlfluff/sqlfluff/pull/4182)
+* [@WillAyd](https://github.com/WillAyd) made their first contribution in [#4198](https://github.com/sqlfluff/sqlfluff/pull/4198)
+
 ## [1.4.5] - 2022-12-18
 
 ## Highlights
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 6e163d8..f2b3637 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -89,13 +89,18 @@ First ensure that you have tox installed:
 ```shell
 python3.8 -m pip install -U tox
 ```
-**IMPORTANT:** `tox` must be installed with a minimum of Python 3.8 as the `mypy` checks are incompatible with 3.7. Those using newer versions of Python may replace `python3.8` as necessary (the test suite runs primarily under 3.11 for example).
+**IMPORTANT:** `tox` must be installed with a minimum of Python 3.8 as
+the `mypy` checks are incompatible with 3.7. Those using newer versions of
+Python may replace `python3.8` as necessary (the test suite runs primarily
+under 3.11 for example).
 
-Note: Unfortunately tox does not currently support setting just a minimum Python version (though this may be be coming in tox 4!).
+Note: Unfortunately tox does not currently support setting just a minimum
+Python version (though this may be be coming in tox 4!).
 
 #### Creating a virtual environment
 
-A virtual environment can then be created and activated by running (check the [requirements](#requirements) before running this):
+A virtual environment can then be created and activated by running (check
+the [requirements](#requirements) before running this):
 ```shell
 tox -e dbt021-py38 --devenv .venv
 source .venv/bin/activate
@@ -109,9 +114,10 @@ choose one of the dbt environments.)
 
 Windows users should call `.venv\Scripts\activate` rather than `source .venv/bin/activate`.
 
-This virtual environment will already have the package installed in editable mode for you, as well as
-`requirements_dev.txt` and `plugins/sqlfluff-plugin-example`. Additionally if a dbt virtual environment
-was specified, you will also have `dbt-core`, `dbt-postgres`, and `plugins/sqlfluff-templater-dbt` available.
+This virtual environment will already have the package installed in editable
+mode for you, as well as `requirements_dev.txt` and `plugins/sqlfluff-plugin-example`.
+Additionally if a dbt virtual environment was specified, you will also have
+`dbt-core`, `dbt-postgres`, and `plugins/sqlfluff-templater-dbt` available.
 
 ### Wiki
 
@@ -137,7 +143,8 @@ pip install -e plugins/sqlfluff-templater-dbt/.
 
 ### Testing
 
-To test locally, SQLFluff uses `tox` (check the [requirements](#requirements)!). The test suite can be run via:
+To test locally, SQLFluff uses `tox` (check the [requirements](#requirements)!).
+The test suite can be run via:
 
 ```shell
 tox
@@ -167,7 +174,7 @@ faster while working on an issue, before running full tests at the end.
 For example, you can run specific tests by making use of the `-k` option in `pytest`:
 
 ```
-tox -e py38 -- -k L012 test
+tox -e py38 -- -k AL02 test
 ```
 
 Alternatively, you can also run tests from a specific directory or file only:
@@ -184,7 +191,9 @@ sqlfluff parse test.sql
 
 #### dbt templater tests
 
-The dbt templater tests require a locally running Postgres instance. See the required connection parameters in `plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles.yml`. We recommend using https://postgresapp.com/.
+The dbt templater tests require a locally running Postgres instance. See the
+required connection parameters in `plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles.yml`.
+We recommend using https://postgresapp.com/.
 
 To run the dbt-related tests you will have to explicitly include these tests:
 
@@ -206,7 +215,8 @@ Docker Compose environment. It's a simple two-container configuration:
 Steps to use the Docker Compose environment:
 * Install Docker on your machine.
 * Run `plugins/sqlfluff-templater-dbt/docker/startup` to create the containers.
-* Run `plugins/sqlfluff-templater-dbt/docker/shell` to start a bash session in the `app` container.
+* Run `plugins/sqlfluff-templater-dbt/docker/shell` to start a bash session
+  in the `app` container.
 
 Inside the container, run:
 ```
@@ -215,18 +225,23 @@ py.test -v plugins/sqlfluff-templater-dbt/test/
 
 ### Pre-Commit Config
 
-For development convenience we also provide a `.pre-commit-config.yaml` file to allow the user to install a selection of pre-commit hooks by running (check the [requirements](#requirements) before running this):
+For development convenience we also provide a `.pre-commit-config.yaml` file
+to allow the user to install a selection of pre-commit hooks by running (check
+the [requirements](#requirements) before running this):
 
 ```
 tox -e pre-commit -- install
 ```
 
-These hooks can help the user identify and fix potential linting/typing violations prior to committing their code and therefore reduce having to deal with these sort of issues during code review.
+These hooks can help the user identify and fix potential linting/typing
+violations prior to committing their code and therefore reduce having to deal
+with these sort of issues during code review.
 
 ### Documentation Website
 
-Documentation is built using Sphinx with some pages being built based on the source code.
-See the [Documentation Website README.md](./docs/README.md) file for more information on how to build and test this.
+Documentation is built using Sphinx with some pages being built based on the
+source code. See the [Documentation Website README.md](./docs/README.md) file
+for more information on how to build and test this.
 
 ### Building Package
 
@@ -236,41 +251,81 @@ whenever a new release is published to GitHub.
 
 #### Release checklist:
 
-The [release page](https://github.com/sqlfluff/sqlfluff/releases) shows maintainers all merges since last release. Once we have a long enough list, we should prepare a release.
+The [release page](https://github.com/sqlfluff/sqlfluff/releases) shows
+maintainers all merges since last release. Once we have a long enough list,
+we should prepare a release.
 
-A release PR can be created by maintainers via the ["Create release pull request" GitHub Action](https://github.com/sqlfluff/sqlfluff/actions/workflows/create-release-pull-request.yaml).
+A release PR can be created by maintainers via the
+["Create release pull request" GitHub Action](https://github.com/sqlfluff/sqlfluff/actions/workflows/create-release-pull-request.yaml).
 
-As further PRs are merged, we may need to rerun the release script again (or alternatively just manually updating the branch). This can only be rerun locally (the GitHub Action will exit error if the branch already exists to prevent overwriting it).
+As further PRs are merged, we may need to rerun the release script again
+(or alternatively just manually updating the branch). This can only be rerun
+locally (the GitHub Action will exit error if the branch already exists to
+prevent overwriting it).
 
-Check out the release branch created by the GitHub Action locally and run the script. It will preserve any `Highlights` you have added and update the other sections with new contributions. It can be run as follows (you will need a [GitHub Personal Access Token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) with "repo" permission):
+Check out the release branch created by the GitHub Action locally and run
+the script. It will preserve any `Highlights` you have added and update the
+other sections with new contributions. It can be run as follows (you will
+need a [GitHub Personal Access Token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) with "repo" permission):
 
 ```shell
 source .venv/bin/activate
 export GITHUB_REPOSITORY_OWNER=sqlfluff
 export GITHUB_TOKEN=gho_xxxxxxxx # Change to your token with "repo" permissions.
-python util.py prepare-release --new_version_num=0.13.4 # Change to your release number
+python util.py release 2.0.3 # Change to your release number
 ```
 
-Below is the old list of release steps, but many are automated by the process described above.
+Below is the old list of release steps, but many are automated by the process
+described above.
 
 - [ ] Change the version in `setup.cfg` and `plugins/sqlfluff-templater-dbt/setup.cfg`
 - [ ] Update the stable_version in the `[sqlfluff_docs]` section of `setup.cfg`
-- [ ] Copy the draft releases from https://github.com/sqlfluff/sqlfluff/releases to [CHANGELOG.md](CHANGELOG.md). These draft release notes have been created by a GitHub Action on each PR merge.
-- [ ] If you pretend to create a new draft in GitHub and hit "Auto Generate Release Notes", then it will basically recreate these notes (though in a slightly different format), but also add a nice "First contributors" section, so can copy that "First contributors" section too and then abandon that new draft ([an issues](https://github.com/release-drafter/release-drafter/issues/1001) has been raised to ask for this in Release Drafter GitHub Action).
-- [ ] Add markdown links to PRs as annoyingly GitHub doesn't do this automatically when displaying Markdown files, like it does for comments. You can use regex in most code editors to replace `\(#([0-9]*)\) @([^ ]*)$` to `[#$1](https://github.com/sqlfluff/sqlfluff/pull/$1) [@$2](https://github.com/$2)`, or if using the GitHub generated release notes then can replace `by @([^ ]*) in https://github.com/sqlfluff/sqlfluff/pull/([0-9]*)$` to `[#$2](https://github.com/sqlfluff/sqlfluff/pull/$2) [@$1](https://github.com/$1)`.
-- [ ] For the new contributors section, you can replace `\* @([^ ]*) made their first contribution in https://github.com/sqlfluff/sqlfluff/pull/([0-9]*)$` with `* [@$1](https://github.com/$1) made their first contribution in [#$2](https://github.com/sqlfluff/sqlfluff/pull/$2)` to do this automatically).
-- [ ] Check each issue title is clear, and if not edit issue title (which will automatically update Release notes on next PR merged, as the Draft one is recreated in full each time). We also don't use [conventional commit PR titles](https://www.conventionalcommits.org/en/v1.0.0/) (e.g. `feat`) so make them more English readable. Make same edits locally in [CHANGELOG.md](CHANGELOG.md).
+- [ ] Copy the draft releases from https://github.com/sqlfluff/sqlfluff/releases
+      to [CHANGELOG.md](CHANGELOG.md). These draft release notes have been created
+      by a GitHub Action on each PR merge.
+- [ ] If you pretend to create a new draft in GitHub and hit "Auto Generate Release
+      Notes", then it will basically recreate these notes (though in a slightly
+      different format), but also add a nice "First contributors" section, so can
+      copy that "First contributors" section too and then abandon that new draft
+      ([an issues](https://github.com/release-drafter/release-drafter/issues/1001)
+      has been raised to ask for this in Release Drafter GitHub Action).
+- [ ] Add markdown links to PRs as annoyingly GitHub doesn't do this automatically
+      when displaying Markdown files, like it does for comments. You can use regex
+      in most code editors to replace `\(#([0-9]*)\) @([^ ]*)$` to
+      `[#$1](https://github.com/sqlfluff/sqlfluff/pull/$1) [@$2](https://github.com/$2)`,
+      or if using the GitHub generated release notes then can replace
+      `by @([^ ]*) in https://github.com/sqlfluff/sqlfluff/pull/([0-9]*)$` to
+      `[#$2](https://github.com/sqlfluff/sqlfluff/pull/$2) [@$1](https://github.com/$1)`.
+- [ ] For the new contributors section, you can replace
+      `\* @([^ ]*) made their first contribution in https://github.com/sqlfluff/sqlfluff/pull/([0-9]*)$`
+      with `* [@$1](https://github.com/$1) made their first contribution in [#$2](https://github.com/sqlfluff/sqlfluff/pull/$2)` to do this automatically).
+- [ ] Check each issue title is clear, and if not edit issue title (which will
+      automatically update Release notes on next PR merged, as the Draft one is
+      recreated in full each time). We also don't use
+      [conventional commit PR titles](https://www.conventionalcommits.org/en/v1.0.0/)
+      (e.g. `feat`) so make them more English readable. Make same edits locally
+      in [CHANGELOG.md](CHANGELOG.md).
 - [ ] Add a comment at the top to highlight the main things in this release.
-- [ ] If this is a non-patch release then update the `Notable changes` section in `index.rst` with a brief summary of the new features added that made this a non-patch release.
-- [ ] View the CHANGELOG in this branch on GitHub to ensure you didn't miss any link conversions or other markup errors.
-- [ ] Open draft PR with those change a few days in advance to give contributors notice. Tag those with open PRs in the PR in GitHub to give them time to merge their work before the new release
+- [ ] If this is a non-patch release then update the `Notable changes` section in
+      `index.rst` with a brief summary of the new features added that made this a
+      non-patch release.
+- [ ] View the CHANGELOG in this branch on GitHub to ensure you didn't miss any
+      link conversions or other markup errors.
+- [ ] Open draft PR with those change a few days in advance to give contributors
+      notice. Tag those with open PRs in the PR in GitHub to give them time to merge
+      their work before the new release
 - [ ] Comment in #contributing slack channel about release candidate.
 - [ ] Update the draft PR as more changes get merged.
 - [ ] Get another contributor to approve the PR.
 - [ ] Merge the PR when looks like we've got all we’re gonna get for this release.
-- [ ] Go to the [releases page](https://github.com/sqlfluff/sqlfluff/releases), edit the release to be same as [CHANGELOG.md](CHANGELOG.md) (remember to remove your release PR which doesn’t need to go in this). Add version tag and a title and click “Publish release”.
-- [ ] Announce the release in the #general channel, with shout outs to those who contributed many, or big items.
-- [ ] Announce the release on Twitter (@tunetheweb can do this or let him know your Twitter handle if you want access to Tweet on SQLFluff’s behalf).
+- [ ] Go to the [releases page](https://github.com/sqlfluff/sqlfluff/releases), edit
+      the release to be same as [CHANGELOG.md](CHANGELOG.md) (remember to remove your
+      release PR which doesn’t need to go in this). Add version tag and a title and
+      click “Publish release”.
+- [ ] Announce the release in the #general channel, with shout outs to those who
+      contributed many, or big items.
+- [ ] Announce the release on Twitter (@tunetheweb can do this or let him know your
+      Twitter handle if you want access to Tweet on SQLFluff’s behalf).
 
 :warning: **Before creating a new release, ensure that
 [setup.cfg](setup.cfg) is up-to-date with a new version** :warning:.
diff --git a/Dockerfile b/Dockerfile
index 709cd73..bdf96bc 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -7,14 +7,14 @@ WORKDIR /app
 ENV VIRTUAL_ENV /app/.venv
 RUN python -m venv $VIRTUAL_ENV
 ENV PATH $VIRTUAL_ENV/bin:$PATH
-RUN pip install --upgrade pip setuptools wheel
+RUN pip install --no-cache-dir --upgrade pip setuptools wheel
 
 # Install requirements separately
 # to take advantage of layer caching.
 # N.B. we extract the requirements from setup.cfg
 COPY setup.cfg .
 RUN python -c "import configparser; c = configparser.ConfigParser(); c.read('setup.cfg'); print(c['options']['install_requires'])" > requirements.txt
-RUN pip install --upgrade -r requirements.txt
+RUN pip install --no-cache-dir --upgrade -r requirements.txt
 
 # Copy minimal set of SQLFluff package files.
 COPY MANIFEST.in .
@@ -23,7 +23,7 @@ COPY setup.py .
 COPY src ./src
 
 # Install sqlfluff package.
-RUN pip install --no-dependencies .
+RUN pip install --no-cache-dir --no-dependencies .
 
 # Switch to non-root user.
 USER 5000
diff --git a/README.md b/README.md
index 669815a..43d641a 100644
--- a/README.md
+++ b/README.md
@@ -9,24 +9,34 @@
 [![PyPi Downloads](https://img.shields.io/pypi/dm/sqlfluff?style=flat-square)](https://pypi.org/project/sqlfluff/)
 
 [![codecov](https://img.shields.io/codecov/c/gh/sqlfluff/sqlfluff.svg?style=flat-square&logo=Codecov)](https://codecov.io/gh/sqlfluff/sqlfluff)
-[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/sqlfluff/sqlfluff/CI%20Tests?logo=github&style=flat-square)](https://github.com/sqlfluff/sqlfluff/actions/workflows/ci-tests.yml?query=branch%3Amain)
+[![Coveralls](https://img.shields.io/coverallsCoverage/github/sqlfluff/sqlfluff?logo=coveralls&style=flat-square)](https://coveralls.io/github/sqlfluff/sqlfluff?branch=main)
+[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sqlfluff/sqlfluff/.github/workflows/ci-tests.yml?logo=github&style=flat-square)](https://github.com/sqlfluff/sqlfluff/actions/workflows/ci-tests.yml?query=branch%3Amain)
 [![ReadTheDocs](https://img.shields.io/readthedocs/sqlfluff?style=flat-square&logo=Read%20the%20Docs)](https://sqlfluff.readthedocs.io)
 [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg?style=flat-square)](https://github.com/psf/black)
 [![Docker Pulls](https://img.shields.io/docker/pulls/sqlfluff/sqlfluff?logo=docker&style=flat-square)](https://hub.docker.com/r/sqlfluff/sqlfluff)
 
-**SQLFluff** is a dialect-flexible and configurable SQL linter. Designed with ELT applications in mind, **SQLFluff** also works with Jinja templating and dbt. **SQLFluff** will auto-fix most linting errors, allowing you to focus your time on what matters.
+**SQLFluff** is a dialect-flexible and configurable SQL linter. Designed
+with ELT applications in mind, **SQLFluff** also works with Jinja templating
+and dbt. **SQLFluff** will auto-fix most linting errors, allowing you to focus
+your time on what matters.
 
 ## Dialects Supported
 
-Although SQL is reasonably consistent in its implementations, there are several different dialects available with variations of syntax and grammar. **SQLFluff** currently supports the following SQL dialects (though perhaps not in full):
+Although SQL is reasonably consistent in its implementations, there are several
+different dialects available with variations of syntax and grammar. **SQLFluff**
+currently supports the following SQL dialects (though perhaps not in full):
 
-- ANSI SQL - this is the base version and on occasion may not strictly follow the ANSI/ISO SQL definition
+- ANSI SQL - this is the base version and on occasion may not strictly follow
+  the ANSI/ISO SQL definition
 - [Athena](https://aws.amazon.com/athena/)
 - [BigQuery](https://cloud.google.com/bigquery/)
 - [ClickHouse](https://clickhouse.com/)
-- [Databricks](https://databricks.com/) (note: currently this is just an alias for the `sparksql` dialect).
+- [Databricks](https://databricks.com/) (note: this extends the `sparksql` dialect with
+  [Unity Catalog](https://docs.databricks.com/data-governance/unity-catalog/index.html) syntax).
 - [Db2](https://www.ibm.com/analytics/db2)
+- [DuckDB](https://duckdb.org/)
 - [Exasol](https://www.exasol.com/)
+- [Greenplum](https://greenplum.org/)
 - [Hive](https://hive.apache.org/)
 - [Materialize](https://materialize.com/)
 - [MySQL](https://www.mysql.com/)
@@ -40,13 +50,21 @@ Although SQL is reasonably consistent in its implementations, there are several
 - [Teradata](https://www.teradata.com/)
 - [Transact-SQL](https://docs.microsoft.com/en-us/sql/t-sql/language-reference) (aka T-SQL)
 
-We aim to make it easy to expand on the support of these dialects and also add other, currently unsupported, dialects. Please [raise issues](https://github.com/sqlfluff/sqlfluff/issues) (or upvote any existing issues) to let us know of demand for missing support.
+We aim to make it easy to expand on the support of these dialects and also
+add other, currently unsupported, dialects. Please [raise issues](https://github.com/sqlfluff/sqlfluff/issues)
+(or upvote any existing issues) to let us know of demand for missing support.
 
-Pull requests from those that know the missing syntax or dialects are especially welcomed and are the question way for you to get support added. We are happy to work with any potential contributors on this to help them add this support. Please raise an issue first for any large feature change to ensure it is a good fit for this project before spending time on this work.
+Pull requests from those that know the missing syntax or dialects are especially
+welcomed and are the question way for you to get support added. We are happy
+to work with any potential contributors on this to help them add this support.
+Please raise an issue first for any large feature change to ensure it is a good
+fit for this project before spending time on this work.
 
 ## Templates Supported
 
-SQL itself does not lend itself well to [modularity](https://docs.getdbt.com/docs/viewpoint#section-modularity), so to introduce some flexibility and reusability it is often [templated](https://en.wikipedia.org/wiki/Template_processor) as discussed more in [our modularity documentation](https://docs.sqlfluff.com/en/stable/realworld.html#modularity).
+SQL itself does not lend itself well to [modularity](https://docs.getdbt.com/docs/viewpoint#section-modularity),
+so to introduce some flexibility and reusability it is often [templated](https://en.wikipedia.org/wiki/Template_processor)
+as discussed more in [our modularity documentation](https://docs.sqlfluff.com/en/stable/realworld.html#modularity).
 
 **SQLFluff** supports the following templates:
 - [Jinja](https://jinja.palletsprojects.com/) (aka Jinja2)
@@ -54,6 +72,13 @@ SQL itself does not lend itself well to [modularity](https://docs.getdbt.com/doc
 
 Again, please raise issues if you wish to support more templating languages/syntaxes.
 
+## VS Code Extension
+
+We also have a VS Code extension:
+
+- [Github Repository](https://github.com/sqlfluff/vscode-sqlfluff)
+- [Extension in VS Code marketplace](https://marketplace.visualstudio.com/items?itemName=dorzey.vscode-sqlfluff)
+
 # Getting Started
 
 To get started, install the package and run `sqlfluff lint` or `sqlfluff fix`.
@@ -63,43 +88,75 @@ $ pip install sqlfluff
 $ echo "  SELECT a  +  b FROM tbl;  " > test.sql
 $ sqlfluff lint test.sql --dialect ansi
 == [test.sql] FAIL
-L:   1 | P:   1 | L050 | Files must not begin with newlines or whitespace.
-L:   1 | P:   3 | L003 | First line has unexpected indent
-L:   1 | P:  11 | L039 | Unnecessary whitespace found.
-L:   1 | P:  14 | L039 | Unnecessary whitespace found.
-L:   1 | P:  27 | L001 | Unnecessary trailing whitespace.
+L:   1 | P:   1 | LT01 | Expected only single space before 'SELECT' keyword.
+                       | Found '  '. [layout.spacing]
+L:   1 | P:   1 | LT02 | First line should not be indented.
+                       | [layout.indent]
+L:   1 | P:   1 | LT13 | Files must not begin with newlines or whitespace.
+                       | [layout.start_of_file]
+L:   1 | P:  11 | LT01 | Expected only single space before binary operator '+'.
+                       | Found '  '. [layout.spacing]
+L:   1 | P:  14 | LT01 | Expected only single space before naked identifier.
+                       | Found '  '. [layout.spacing]
+L:   1 | P:  27 | LT01 | Unnecessary trailing whitespace at end of file.
+                       | [layout.spacing]
+L:   1 | P:  27 | LT12 | Files must end with a single trailing newline.
+                       | [layout.end-of-file]
+All Finished 📜 🎉!
 ```
 
-Alternatively, you can use the [**Official SQLFluff Docker Image**](https://hub.docker.com/r/sqlfluff/sqlfluff) or have a play using [**SQLFluff online**](https://online.sqlfluff.com/).
+Alternatively, you can use the [**Official SQLFluff Docker Image**](https://hub.docker.com/r/sqlfluff/sqlfluff)
+or have a play using [**SQLFluff online**](https://online.sqlfluff.com/).
 
-For full [CLI usage](https://docs.sqlfluff.com/en/stable/cli.html) and [rules reference](https://docs.sqlfluff.com/en/stable/rules.html), see [the SQLFluff docs](https://docs.sqlfluff.com/en/stable/).
+For full [CLI usage](https://docs.sqlfluff.com/en/stable/cli.html) and
+[rules reference](https://docs.sqlfluff.com/en/stable/rules.html), see
+[the SQLFluff docs](https://docs.sqlfluff.com/en/stable/).
 
 # Documentation
 
-For full documentation visit [docs.sqlfluff.com](https://docs.sqlfluff.com/en/stable/). This documentation is generated from this repository so please raise [issues](https://github.com/sqlfluff/sqlfluff/issues) or pull requests for any additions, corrections, or clarifications.
+For full documentation visit [docs.sqlfluff.com](https://docs.sqlfluff.com/en/stable/).
+This documentation is generated from this repository so please raise
+[issues](https://github.com/sqlfluff/sqlfluff/issues) or pull requests
+for any additions, corrections, or clarifications.
 
 # Releases
 
-**SQLFluff** adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html), so breaking changes
-should be restricted to major versions releases. Some elements (such as the python API) are in a less
-stable state and may see more significant changes more often. See the [changelog](CHANGELOG.md) for more details.
-If you would like to join in please consider [contributing](CONTRIBUTING.md).
+**SQLFluff** adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html),
+so breaking changes should be restricted to major versions releases. Some
+elements (such as the python API) are in a less stable state and may see more
+significant changes more often. For details on breaking changes and how
+to migrate between versions, see our
+[release notes](https://docs.sqlfluff.com/en/latest/releasenotes.html). See the
+[changelog](CHANGELOG.md) for more details. If you would like to join in, please
+consider [contributing](CONTRIBUTING.md).
 
-New releases are made monthly. For more information, visit [Releases](https://github.com/sqlfluff/sqlfluff/releases).
+New releases are made monthly. For more information, visit
+[Releases](https://github.com/sqlfluff/sqlfluff/releases).
 
 # SQLFluff on Slack
 
-We have a fast-growing community [on Slack](https://join.slack.com/t/sqlfluff/shared_invite/zt-o1f4x0e8-pZzarAIlQmKj_6ZwD16w0g), come and join us!
+We have a fast-growing community
+[on Slack](https://join.slack.com/t/sqlfluff/shared_invite/zt-o1f4x0e8-pZzarAIlQmKj_6ZwD16w0g),
+come and join us!
 
 # SQLFluff on Twitter
 
-Follow us [on Twitter @SQLFluff](https://twitter.com/SQLFluff) for announcements and other related posts.
+Follow us [on Twitter @SQLFluff](https://twitter.com/SQLFluff) for announcements
+and other related posts.
 
 # Contributing
 
-We are grateful to all our [contributors](https://github.com/sqlfluff/sqlfluff/graphs/contributors). There is a lot to do in this project, and we are just getting started.
+We are grateful to all our [contributors](https://github.com/sqlfluff/sqlfluff/graphs/contributors).
+There is a lot to do in this project, and we are just getting started.
 
 If you want to understand more about the architecture of **SQLFluff**, you can
 find [more here](https://docs.sqlfluff.com/en/latest/internals.html#architecture).
 
-If you would like to contribute, check out the [open issues on GitHub](https://github.com/sqlfluff/sqlfluff/issues). You can also see the guide to [contributing](CONTRIBUTING.md).
+If you would like to contribute, check out the
+[open issues on GitHub](https://github.com/sqlfluff/sqlfluff/issues). You can also see
+the guide to [contributing](CONTRIBUTING.md).
+
+# Sponsors
+
+<img src="images/datacoves.png" alt="Datacoves" width="150"/><br>
+The turnkey analytics stack, find out more at [Datacoves.com](https://datacoves.com/).
diff --git a/constraints/dbt020.txt b/constraints/dbt020.txt
deleted file mode 100644
index ef5c7bb..0000000
--- a/constraints/dbt020.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-dbt-core==0.20.2
-dbt-postgres==0.20.2
-markupsafe<=2.0.1
diff --git a/constraints/dbt021.txt b/constraints/dbt021.txt
deleted file mode 100644
index 2e08e60..0000000
--- a/constraints/dbt021.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-dbt-core==0.21.0
-dbt-postgres==0.21.0
-markupsafe<=2.0.1
diff --git a/constraints/dbt100-winpy.txt b/constraints/dbt100-winpy.txt
deleted file mode 100644
index dfd662f..0000000
--- a/constraints/dbt100-winpy.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-dbt-core==1.0.0
-dbt-postgres==1.0.0
-markupsafe<=2.0.1
diff --git a/constraints/dbt100.txt b/constraints/dbt100.txt
deleted file mode 100644
index dfd662f..0000000
--- a/constraints/dbt100.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-dbt-core==1.0.0
-dbt-postgres==1.0.0
-markupsafe<=2.0.1
diff --git a/constraints/dbt110.txt b/constraints/dbt110.txt
new file mode 100644
index 0000000..0fed3d6
--- /dev/null
+++ b/constraints/dbt110.txt
@@ -0,0 +1,2 @@
+dbt-core~=1.1
+dbt-postgres~=1.1
diff --git a/constraints/dbt130.txt b/constraints/dbt130.txt
deleted file mode 100644
index 64157f6..0000000
--- a/constraints/dbt130.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-dbt-core~=1.3
-dbt-postgres~=1.3
diff --git a/constraints/dbt140-winpy.txt b/constraints/dbt140-winpy.txt
new file mode 100644
index 0000000..a9fa3a0
--- /dev/null
+++ b/constraints/dbt140-winpy.txt
@@ -0,0 +1,3 @@
+dbt-core~=1.4
+dbt-postgres~=1.4
+markupsafe<=2.0.1
diff --git a/constraints/dbt140.txt b/constraints/dbt140.txt
new file mode 100644
index 0000000..673a019
--- /dev/null
+++ b/constraints/dbt140.txt
@@ -0,0 +1,2 @@
+dbt-core~=1.4
+dbt-postgres~=1.4
diff --git a/debian/changelog b/debian/changelog
index cd6b754..1c8728e 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+sqlfluff (2.0.5-1) UNRELEASED; urgency=low
+
+  * New upstream release.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Sat, 15 Apr 2023 02:39:41 -0000
+
 sqlfluff (1.4.5-2) unstable; urgency=medium
 
   * upload to unstable for testing migration
diff --git a/debian/patches/disable_buttons_in_documentation.patch b/debian/patches/disable_buttons_in_documentation.patch
index 1689f27..638d94a 100644
--- a/debian/patches/disable_buttons_in_documentation.patch
+++ b/debian/patches/disable_buttons_in_documentation.patch
@@ -6,9 +6,11 @@ Forwarded: not-needed
 Last-Update: 2023-02-02
 ---
 This patch header follows DEP-3: http://dep.debian.net/deps/dep3/
---- a/docs/source/conf.py
-+++ b/docs/source/conf.py
-@@ -81,21 +81,9 @@ html_static_path = ["_static"]
+Index: sqlfluff.git/docs/source/conf.py
+===================================================================
+--- sqlfluff.git.orig/docs/source/conf.py
++++ sqlfluff.git/docs/source/conf.py
+@@ -87,21 +87,9 @@ html_static_path = ["_static"]
  # -- Options for Alabaster Theme ---------------------------------------------
  
  html_theme_options = {
diff --git a/docs/Makefile b/docs/Makefile
index 09005c5..934e08e 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -20,4 +20,5 @@ help:
 # Catch-all target: route all unknown targets to Sphinx using the new
 # "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
 %: Makefile
+	python generate-rule-docs.py
 	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/generate-rule-docs.py b/docs/generate-rule-docs.py
new file mode 100644
index 0000000..109d1a4
--- /dev/null
+++ b/docs/generate-rule-docs.py
@@ -0,0 +1,102 @@
+"""Generate rule documentation automatically."""
+
+from collections import defaultdict
+from pathlib import Path
+
+from sqlfluff.core.plugin.host import get_plugin_manager
+
+base_path = Path(__file__).parent.absolute()
+
+##########################################
+# Generate rule documentation dynamically.
+##########################################
+
+autogen_header = """..
+    NOTE: This file is generated by the conf.py script.
+    Don't edit this by hand
+
+
+"""
+
+table_header = f"""
++{'-' * 42}+{'-' * 50}+{'-' * 30}+{'-' * 20}+
+|{'Bundle' : <42}|{'Rule Name' : <50}|{'Code' : <30}|{'Aliases' : <20}|
++{'=' * 42}+{'=' * 50}+{'=' * 30}+{'=' * 20}+
+"""
+
+# Extract all the rules.
+print("Rule Docs Generation: Reading Rules...")
+rule_bundles = defaultdict(list)
+for plugin_rules in get_plugin_manager().hook.get_rules():
+    for rule in plugin_rules:
+        _bundle_name = rule.name.split(".")[0]
+        rule_bundles[_bundle_name].append(rule)
+
+# Write them into the table. Bundle by bundle.
+print("Rule Docs Generation: Writing Rule Table...")
+with open(base_path / "source/partials/rule_table.rst", "w", encoding="utf8") as f:
+    f.write(autogen_header)
+    f.write(table_header)
+    for bundle in sorted(rule_bundles.keys()):
+        # Set the bundle name to the ref.
+        _bundle_name = f":ref:`bundle_{bundle}`"
+        for idx, rule in enumerate(rule_bundles[bundle]):
+            aliases = ", ".join(rule.aliases[:3]) + (
+                "," if len(rule.aliases) > 3 else ""
+            )
+            name_ref = f":sqlfluff:ref:`{rule.name}`"
+            code_ref = f":sqlfluff:ref:`{rule.code}`"
+            f.write(
+                f"| {_bundle_name : <40} | {name_ref : <48} "
+                f"| {code_ref : <28} | {aliases : <18} |\n"
+            )
+
+            j = 3
+            while True:
+                if not rule.aliases[j:]:
+                    break
+                aliases = ", ".join(rule.aliases[j : j + 3]) + (
+                    "," if len(rule.aliases[j:]) > 3 else ""
+                )
+                f.write(f"|{' ' * 42}|{' ' * 50}|{' ' * 30}| {aliases : <18} |\n")
+                j += 3
+
+            if idx + 1 < len(rule_bundles[bundle]):
+                f.write(f"|{' ' * 42}+{'-' * 50}+{'-' * 30}+{'-' * 20}+\n")
+            else:
+                f.write(f"+{'-' * 42}+{'-' * 50}+{'-' * 30}+{'-' * 20}+\n")
+            # Unset the bundle name so we don't repeat it.
+            _bundle_name = ""
+    f.write("\n\n")
+
+
+# Write each of the summary files.
+print("Rule Docs Generation: Writing Rule Summaries...")
+with open(base_path / "source/partials/rule_summaries.rst", "w", encoding="utf8") as f:
+    f.write(autogen_header)
+    for bundle in sorted(rule_bundles.keys()):
+        if "sql" in bundle:
+            # This accounts for things like "TSQL"
+            header_name = bundle.upper()
+        else:
+            header_name = bundle.capitalize()
+        # Write the bundle header.
+        f.write(
+            f".. _bundle_{bundle}:\n\n"
+            f"{header_name} bundle\n"
+            f"{'-' * (len(bundle) + 7)}\n\n"
+        )
+        for rule in rule_bundles[bundle]:
+            f.write(
+                f".. sqlfluff:rule:: {rule.code}\n"
+                f"                   {rule.name}\n\n"
+            )
+            # Separate off the heading so we can bold it.
+            heading, _, doc_body = rule.__doc__.partition("\n")
+            underline_char = '"'
+            f.write(f"    {heading}\n")
+            f.write(f"    {underline_char * len(heading)}\n\n")
+            f.write("    " + doc_body)
+            f.write("\n\n")
+
+print("Rule Docs Generation: Done")
diff --git a/docs/make.bat b/docs/make.bat
index 9534b01..ee9bb5f 100644
--- a/docs/make.bat
+++ b/docs/make.bat
@@ -25,6 +25,9 @@ if errorlevel 9009 (
 	exit /b 1
 )
 
+REM Generate the rule docs
+py generate-rule-docs.py
+
 %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
 goto end
 
diff --git a/docs/source/_ext/sqlfluff_domain.py b/docs/source/_ext/sqlfluff_domain.py
new file mode 100644
index 0000000..f4328db
--- /dev/null
+++ b/docs/source/_ext/sqlfluff_domain.py
@@ -0,0 +1,139 @@
+"""The sqlfluff domain for documenting rules."""
+
+from sphinx import addnodes
+from sphinx.domains import Domain, ObjType
+from sphinx.directives import ObjectDescription
+from sphinx.roles import XRefRole
+from sphinx.util.nodes import make_refnode
+
+
+class SQLFluffRule(ObjectDescription):
+    """SQLFluff rule directive for sphinx.
+
+    Rule directives can be used as shown below.
+
+    .. code-block:: rst
+
+        .. sqlfluff:rule:: AM01
+                        ambiguous.distinct
+
+            Write the documentation for the rule here.
+
+    """
+
+    def handle_signature(self, sig, signode):
+        """Handle the initial signature of the node.
+
+        This formats the header of the section.
+        """
+        raw_obj_type = "code" if len(sig) == 4 else "rule"
+        obj_type = raw_obj_type.capitalize() + " "
+        signode += addnodes.desc_type(obj_type, obj_type)
+        signode += addnodes.desc_name(sig, sig)
+
+        fullname = obj_type + sig
+        signode["type"] = raw_obj_type
+        signode["sig"] = sig
+        signode["fullname"] = fullname
+        return (fullname, raw_obj_type, sig)
+
+    def add_target_and_index(self, name_cls, sig, signode):
+        """Hook to add the permalink and index entries."""
+        # Add an ID for permalinks
+        node_id = "rule" + "-" + sig
+        signode["ids"].append(node_id)
+        if len(sig) == 4:
+            # If it's a code, add support for legacy links too.
+            # Both of these formats have been used in the past.
+            signode["ids"].append(f"sqlfluff.rules.Rule_{sig}")
+            signode["ids"].append(f"sqlfluff.rules.sphinx.Rule_{sig}")
+        # Add to domain for xref resolution
+        fluff = self.env.get_domain("sqlfluff")
+        fluff.add_rule(sig)
+        # Add to index
+        self.indexnode["entries"].append(("single", sig, node_id, "", None))
+
+    def _object_hierarchy_parts(self, sig_node):
+        return ("bundle", "name")
+
+    def _toc_entry_name(self, sig_node) -> str:
+        # NOTE: toctree unpacking issues are due to incorrectly
+        # setting _toc_parts.
+        sig_node["_toc_parts"] = (
+            "bundle",
+            sig_node["sig"],
+        )
+        if len(sig_node["sig"]) == 4:
+            # It's a code - don't return TOC entry.
+            return ""
+        else:
+            # It's a name
+            return sig_node["sig"]
+
+
+class SQLFluffDomain(Domain):
+    """SQLFluff domain."""
+
+    name = "sqlfluff"
+    label = "sqlfluff"
+
+    object_types = {
+        "rule": ObjType("rule", "rule", "obj"),
+    }
+
+    roles = {
+        "ref": XRefRole(),
+    }
+
+    directives = {
+        "rule": SQLFluffRule,
+    }
+
+    initial_data = {
+        "rules": [],  # object list
+    }
+
+    def get_full_qualified_name(self, node):
+        """Get the fully qualified name of the rule."""
+        return f"rule.{node.arguments[0]}"
+
+    def get_objects(self):
+        """Hook to get all the rules."""
+        yield from self.data["rules"]
+
+    def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
+        """Hook to resolve xrefs.
+
+        References can be made by code or by name, e.g.
+        - :sqlfluff:ref:`LT01`
+        - :sqlfluff:ref:`layout.spacing`
+        """
+        match = [
+            (docname, anchor)
+            for _, sig, _, docname, anchor, _ in self.get_objects()
+            if sig == target
+        ]
+
+        if len(match) > 0:
+            todocname = match[0][0]
+            targ = match[0][1]
+
+            return make_refnode(builder, fromdocname, todocname, targ, contnode, targ)
+        else:
+            print(f"Failed to match xref: {target!r}")
+            return None
+
+    def add_rule(self, signature):
+        """Add a new recipe to the domain."""
+        name = f"rule.{signature}"
+        anchor = f"rule-{signature}"
+
+        # name, dispname, type, docname, anchor, priority
+        self.data["rules"].append(
+            (name, signature, "Rule", self.env.docname, anchor, 0)
+        )
+
+
+def setup(app):
+    """Setup the domain."""
+    app.add_domain(SQLFluffDomain)
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 0fc3a46..81022e1 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -5,6 +5,8 @@ list see the documentation:
 https://www.sphinx-doc.org/en/master/usage/configuration.html
 """
 
+import os
+import sys
 import configparser
 
 # -- Path setup --------------------------------------------------------------
@@ -12,10 +14,8 @@ import configparser
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#
-# import os
-# import sys
-# sys.path.insert(0, os.path.abspath('.'))
+
+sys.path.append(os.path.abspath("./_ext"))
 
 # Get the global config info as currently stated
 # (we use the config file to avoid actually loading any python here)
@@ -26,7 +26,7 @@ stable_version = config.get("sqlfluff_docs", "stable_version")
 # -- Project information -----------------------------------------------------
 
 project = "SQLFluff"
-copyright = "2019, Alan Cruickshank"
+copyright = "2023, Alan Cruickshank"
 author = "Alan Cruickshank"
 
 # The full version, including alpha/beta/rc tags
@@ -47,6 +47,8 @@ extensions = [
     "sphinx_click.ext",
     # Redirects
     "sphinx_reredirects",
+    # SQLFluff domain
+    "sqlfluff_domain",
 ]
 
 # Add any paths that contain templates here, relative to this directory.
@@ -55,7 +57,11 @@ templates_path = ["_templates"]
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
 # This pattern also affects html_static_path and html_extra_path.
-exclude_patterns = []
+exclude_patterns = [
+    # Exclude the partials folder, which is made up of files intended
+    # to be included in others.
+    "partials",
+]
 
 # Master doc
 master_doc = "index"
diff --git a/docs/source/configuration.rst b/docs/source/configuration.rst
index b315a44..7408ae1 100644
--- a/docs/source/configuration.rst
+++ b/docs/source/configuration.rst
@@ -11,6 +11,9 @@ must be done via a file, because it otherwise gets slightly complicated.
 For details of what's available on the command line check out
 the :ref:`cliref`.
 
+Configuration files
+-------------------
+
 For file based configuration *SQLFluff* will look for the following
 files in order. Later files will (if found) will be used to overwrite
 any values read from earlier files.
@@ -54,11 +57,11 @@ For the `pyproject.toml file`_, all valid sections start with
 
 For example, a snippet from a :code:`pyproject.toml` file:
 
-.. code-block:: cfg
+.. code-block:: toml
 
     [tool.sqlfluff.core]
-    templater = jinja
-    sql_file_exts = .sql,.sql.j2,.dml,.ddl
+    templater = "jinja"
+    sql_file_exts = ".sql,.sql.j2,.dml,.ddl"
 
     [tool.sqlfluff.indentation]
     indented_joins = false
@@ -71,13 +74,44 @@ For example, a snippet from a :code:`pyproject.toml` file:
     [tool.sqlfluff.templater.jinja]
     apply_dbt_builtins = true
 
+    # For rule specific configuration, use dots between the names exactly
+    # as you would in .sqlfluff. In the background, SQLFluff will unpack the
+    # configuration paths accordingly.
+    [tool.sqlfluff.rules.capitalisation.keywords]
+    capitalisation_policy = "upper"
+
 .. _`cfg file`: https://docs.python.org/3/library/configparser.html
 .. _`pyproject.toml file`: https://www.python.org/dev/peps/pep-0518/
 
+
+.. _starter_config:
+
+New Project Configuration
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When setting up a new project with SQLFluff, we recommend keeping your
+configuration file fairly minimal. The config file should act as a form
+of *documentation* for your team i.e. a record of what decisions you've
+made which govern how your format your SQL. By having a more concise
+config file, and only defining config settings where they differ from the
+defaults - you are more clearly stating to your team what choices you've made.
+
+*However*, there are also a few places where the *default* configuration
+is designed more for *existing projects*, rather than *fresh projects*, and
+so there is an opportunity to be a little stricter than you might otherwise
+be with an existing codebase.
+
+Here is a simple configuration file which would be suitable for a starter
+project:
+
+.. literalinclude:: partials/starter_config.cfg
+   :language: cfg
+
+
 .. _nesting:
 
 Nesting
--------
+^^^^^^^
 
 **SQLFluff** uses **nesting** in its configuration files, with files
 closer *overriding* (or *patching*, if you will) values from other files.
@@ -109,6 +143,8 @@ steps overriding those from earlier:
 This whole structure leads to efficient configuration, in particular
 in projects which utilise a lot of complicated templating.
 
+.. _in_file_config:
+
 In-File Configuration Directives
 --------------------------------
 
@@ -130,10 +166,12 @@ A few common examples are shown below:
 
     -- Set Indented Joins
     -- sqlfluff:indentation:indented_joins:true
+
     -- Set a smaller indent for this file
     -- sqlfluff:indentation:tab_space_size:2
+
     -- Set keywords to be capitalised
-    -- sqlfluff:rules:L010:capitalisation_policy:upper
+    -- sqlfluff:rules:capitalisation.keywords:capitalisation_policy:upper
 
     SELECT *
     FROM a
@@ -158,9 +196,6 @@ For example:
 .. code-block:: cfg
 
    [sqlfluff:rules]
-   tab_space_size = 4
-   max_line_length = 80
-   indent_unit = space
    allow_scalar = True
    single_table_references = consistent
    unquoted_identifiers_policy = all
@@ -168,11 +203,11 @@ For example:
 Rule specific configurations are set in rule specific subsections.
 
 For example, enforce that keywords are upper case by configuring the rule
-:class:`L010 <sqlfluff.core.rules.Rule_L010>`:
+:sqlfluff:ref:`CP01`:
 
 .. code-block:: cfg
 
-    [sqlfluff:rules:L010]
+    [sqlfluff:rules:capitalisation.keywords]
     # Keywords
     capitalisation_policy = upper
 
@@ -182,32 +217,81 @@ For an overview of the most common rule configurations that you may want to
 tweak, see `Default Configuration`_ (and use :ref:`ruleref` to find the
 available alternatives).
 
+.. _ruleselection:
+
 Enabling and Disabling Rules
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-To disable individual rules, set :code:`exclude_rules` in the top level section
-of sqlfluff configuration. The value is a comma separated list of rule ids.
+The decision as to which rules are applied to a given file is applied on a file
+by file basis, by the effective configuration for that file. There are two
+configuration values which you can use to set this:
+
+* :code:`rules`, which explicitly *enables* the specified rules. If this
+  parameter is unset or empty for a file, this implies "no selection" and
+  so "all rules" is taken to be the meaning.
+* :code:`exclude_rules`, which explicitly *disables* the specified rules.
+  This parameter is applied *after* the :code:`rules` parameter so can be
+  used to *subtract* from the otherwise enabled set.
+
+Each of these two configuration values accept a comma separated list of
+*references*. Each of those references can be:
+
+* a rule *code* e.g. :code:`LN01`
+* a rule *name* e.g. :code:`layout.indent`
+* a rule *alias*, which is often a deprecated *code* e.g. :code:`L003`
+* a rule *group* e.g. :code:`layout` or :code:`capitalisation`
 
-For example, to disable the rules :class:`L022 <sqlfluff.core.rules.Rule_L022>`
-and :class:`L027 <sqlfluff.core.rules.Rule_L027>`:
+These different references can be mixed within a given expression, which
+results in a very powerful syntax for selecting exactly which rules are
+active for a given file.
+
+.. note::
+
+    It's worth mentioning here that the application of :code:`rules` and
+    :code:`exclude_rules`, with *groups*, *aliases* and *names*, in projects
+    with potentially multiple nested configuration files defining different
+    rules for different areas of a project can get very confusing very fast.
+    While this flexibility is intended for users to take advantage of, we do
+    have some recommendations about how to do this is a way that remains
+    manageable.
+
+    When considering configuration inheritance, each of :code:`rules` and
+    :code:`exclude_rules` will totally overwrite any values in parent config
+    files if they are set in a child file. While the subtraction operation
+    between both of them is calculated *"per file"*, there is no combination
+    operation between two definitions of :code:`rules` (just one overwrites
+    the other).
+
+    The effect of this is that we recommend one of two approaches:
+
+    #. Simply only use :code:`rules`. This has the upshot of each area of
+       your project being very explicit in which rules are enabled. When
+       that changes for part of your project you just reset the whole list
+       of applicable rules for that part of the project.
+    #. Set a single :code:`rules` value in your master project config file
+       and then only use :code:`exclude_rules` in sub-configuration files
+       to *turn off* specific rules for parts of the project where those
+       rules are inappropriate. This keeps the simplicity of only having
+       one value which is inherited, but allows slightly easier and simpler
+       rollout of new rules because we manage by exception.
+
+
+For example, to disable the rules :sqlfluff:ref:`LT08`
+and :sqlfluff:ref:`RF02`:
 
 .. code-block:: cfg
 
     [sqlfluff]
-    exclude_rules = L022, L027
+    exclude_rules = LT08, RF02
 
 To enable individual rules, configure :code:`rules`, respectively.
 
-For example, to enable :class:`L027 <sqlfluff.core.rules.Rule_L027>`:
+For example, to enable :sqlfluff:ref:`RF02`:
 
 .. code-block:: cfg
 
     [sqlfluff]
-    rules = L027
-
-If both :code:`exclude_rules` and :code:`rules` have non-empty value, then the
-excluded rules are removed from the rules list. This allows for example
-enabling common rules on top level but excluding some on subdirectory level.
+    rules = RF02
 
 Rules can also be enabled/disabled by their grouping. Right now, the only
 rule grouping is :code:`core`. This will enable (or disable) a select group
@@ -243,7 +327,7 @@ above:
 .. code-block:: cfg
 
     [sqlfluff]
-    warnings = L019, L007
+    warnings = LT01, LT04
 
 With this configuration, files with no other issues (other than
 those set to warn) will pass. If there are still other issues, then
@@ -252,10 +336,10 @@ the file will still fail, but will show both warnings and failures.
 .. code-block::
 
     == [test.sql] PASS
-    L:   2 | P:   9 | L006 | WARNING: Missing whitespace before +
+    L:   2 | P:   9 | LT01 | WARNING: Missing whitespace before +
     == [test2.sql] FAIL
-    L:   2 | P:   8 | L014 | Unquoted identifiers must be consistently upper case.
-    L:   2 | P:  11 | L006 | WARNING: Missing whitespace before +
+    L:   2 | P:   8 | CP02 | Unquoted identifiers must be consistently upper case.
+    L:   2 | P:  11 | LT01 | WARNING: Missing whitespace before +
 
 This is particularly useful as a transitional tool when considering
 the introduction on new rules on a project where you might want to
@@ -315,9 +399,9 @@ For example, if passed the following *.sql* file:
 .. note::
 
     If there are variables in the template which cannot be found in
-    the current configuration context, then this will raise a `SQLTemplatingError`
-    and this will appear as a violation without a line number, quoting
-    the name of the variable that couldn't be found.
+    the current configuration context, then this will raise a
+    `SQLTemplatingError` and this will appear as a violation without
+    a line number, quoting the name of the variable that couldn't be found.
 
 Placeholder templating
 ^^^^^^^^^^^^^^^^^^^^^^
@@ -844,7 +928,7 @@ You already know you can pass arguments (:code:`--verbose`,
 
 .. code-block:: text
 
-    $ sqlfluff lint my_code.sql -v --exclude-rules L022,L027
+    $ sqlfluff lint my_code.sql -v --exclude-rules LT08,RF02
 
 You might have arguments that you pass through every time, e.g rules you
 *always* want to ignore. These can also be configured:
@@ -853,7 +937,7 @@ You might have arguments that you pass through every time, e.g rules you
 
     [sqlfluff]
     verbose = 1
-    exclude_rules = L022,L027
+    exclude_rules = LT08,RF02
 
 Note that while the :code:`exclude_rules` config looks similar to the
 above example, the :code:`verbose` config has an integer value. This is
@@ -877,8 +961,8 @@ be ignored by quoting their code or the category.
     -- Ignore all errors
     SeLeCt  1 from tBl ;    -- noqa
 
-    -- Ignore rule L014 & rule L030
-    SeLeCt  1 from tBl ;    -- noqa: L014,L030
+    -- Ignore rule CP02 & rule CP03
+    SeLeCt  1 from tBl ;    -- noqa: CP02,CP03
 
     -- Ignore all parsing errors
     SeLeCt from tBl ;       -- noqa: PRS
@@ -895,8 +979,8 @@ ignored until a corresponding `-- noqa:enable=<rule>[,...] | all` directive.
 
 .. code-block:: sql
 
-    -- Ignore rule L012 from this line forward
-    SELECT col_a a FROM foo -- noqa: disable=L012
+    -- Ignore rule AL02 from this line forward
+    SELECT col_a a FROM foo -- noqa: disable=AL02
 
     -- Ignore all rules from this line forward
     SELECT col_a a FROM foo -- noqa: disable=all
@@ -960,6 +1044,31 @@ Default Configuration
 The default configuration is as follows, note the `Builtin Macro Blocks`_ in
 section *[sqlfluff:templater:jinja:macros]* as referred to above.
 
+.. note::
+
+    This shows the *entire* default config. **We do not recommend that users**
+    **copy this whole config as the starter config file for their project**.
+
+    This is for two reasons:
+
+    #. The config file should act as a form of *documentation* for your team.
+       A record of what decisions you've made which govern how your format your
+       sql. By having a more concise config file, and only defining config settings
+       where they differ from the defaults - you are more clearly stating to your
+       team what choices you've made.
+
+    #. As the project evolves, the structure of the config file may change
+       and we will attempt to make changes as backward compatible as possible.
+       If you have not overridden a config setting in your project, we can
+       easily update the default config to match your expected behaviour over time.
+       We may also find issues with the default config which we can also fix
+       in the background. *However*, the longer your local config file, the
+       more work it will be to update and migrate your config file between
+       major versions.
+
+    If you are starting a fresh project and are looking for a good *starter config*,
+    check out the :ref:`starter_config` section above.
+
+
 .. literalinclude:: ../../src/sqlfluff/core/default_config.cfg
    :language: cfg
-   :linenos:
diff --git a/docs/source/developingrules.rst b/docs/source/developingrules.rst
index 9d3cef9..d33df80 100644
--- a/docs/source/developingrules.rst
+++ b/docs/source/developingrules.rst
@@ -24,7 +24,7 @@ Typical reasons include:
 These rules can override ``BaseRule``'s ``recurse_into`` field, setting it to
 ``False``. For these rules ``False``, ``_eval()`` is only called *once*, with
 the root segment of the tree. This can be much more efficient, especially on
-large files. For example, see rules ``L050`` and ``L009`` , which only look at
+large files. For example, see rules ``LT13`` and ``LT12`` , which only look at
 the beginning or end of the file, respectively.
 
 ``_works_on_unparsable``
diff --git a/docs/source/dialects.rst b/docs/source/dialects.rst
index fc7ad09..2f48eae 100644
--- a/docs/source/dialects.rst
+++ b/docs/source/dialects.rst
@@ -84,19 +84,9 @@ The dialect for `ClickHouse`_.
 Databricks
 ----------
 
-The dialect `Databricks`_ is an alias for the :ref:`sparksql_dialect_ref`.
-
-Since Databricks `builds on top of`_ Apache Spark, the Spark SQL dialect
-holds most of the definitions of common commands and structures.
-
-Specifics to Databricks, such as Delta Live Table syntax, are added to the
-Spark SQL dialect to simplify implementation and prevent code duplication
-for minor syntax updates. This follows SQLFluff's philosophy of not being
-strict in adhering to dialect specifications to permit slightly wider set
-of functions than actually available in a given dialect.
+The dialect `Databricks`_.
 
 .. _`Databricks`: https://databricks.com/
-.. _`builds on top of` : https://www.databricks.com/spark/comparing-databricks-to-apache-spark
 
 .. _db2_dialect_ref:
 
@@ -107,6 +97,16 @@ The dialect for `Db2`_.
 
 .. _`Db2`: https://www.ibm.com/analytics/db2
 
+.. _duck_dialect_ref:
+
+DuckDB
+------
+
+The dialect for `DuckDB`_.
+
+.. _`DuckDB`: https://duckdb.org/
+
+
 .. _exasol_dialect_ref:
 
 Exasol
@@ -118,6 +118,15 @@ The dialect for `Exasol`_.
 
 .. _hive_dialect_ref:
 
+Greenplum
+---------
+
+The dialect for `Greenplum`_.
+
+.. _`Greenplum`: https://www.greenplum.org/
+
+.. _greens_dialect_ref:
+
 Hive
 ----
 
diff --git a/docs/source/gettingstarted.rst b/docs/source/gettingstarted.rst
index 0b13284..d124a42 100644
--- a/docs/source/gettingstarted.rst
+++ b/docs/source/gettingstarted.rst
@@ -61,7 +61,7 @@ version number.
 .. code-block:: text
 
     $ sqlfluff version
-    1.4.5
+    2.0.5
 
 Basic Usage
 -----------
@@ -83,22 +83,33 @@ file.
 
     $ sqlfluff lint test.sql --dialect ansi
     == [test.sql] FAIL
-    L:   1 | P:   1 | L034 | Select wildcards then simple targets before calculations
-                           | and aggregates.
-    L:   1 | P:   1 | L036 | Select targets should be on a new line unless there is
+    L:   1 | P:   1 | LT09 | Select targets should be on a new line unless there is
                            | only one select target.
-    L:   1 | P:   9 | L006 | Missing whitespace before +
-    L:   1 | P:   9 | L006 | Missing whitespace after +
-    L:   1 | P:  11 | L039 | Unnecessary whitespace found.
-    L:   2 | P:   1 | L003 | Expected 1 indentations, found 0 [compared to line 01]
-    L:   2 | P:  10 | L010 | Keywords must be consistently upper case.
+                           | [layout.select_targets]
+    L:   1 | P:   1 | ST06 | Select wildcards then simple targets before calculations
+                           | and aggregates. [structure.column_order]
+    L:   1 | P:   7 | LT02 | Expected line break and indent of 4 spaces before 'a'.
+                           | [layout.indent]
+    L:   1 | P:   9 | LT01 | Expected single whitespace between naked identifier and
+                           | binary operator '+'. [layout.spacing]
+    L:   1 | P:  10 | LT01 | Expected single whitespace between binary operator '+'
+                           | and naked identifier. [layout.spacing]
+    L:   1 | P:  11 | LT01 | Expected only single space before 'AS' keyword. Found '
+                           | '. [layout.spacing]
+    L:   2 | P:   1 | LT02 | Expected indent of 4 spaces.
+                           | [layout.indent]
+    L:   2 | P:   9 | LT02 | Expected line break and no indent before 'from'.
+                           | [layout.indent]
+    L:   2 | P:  10 | CP01 | Keywords must be consistently upper case.
+                           | [capitalisation.keywords]
+    All Finished 📜 🎉!
 
 You'll see that *SQLFluff* has failed the linting check for this file.
 On each of the following lines you can see each of the problems it has
 found, with some information about the location and what kind of
 problem there is. One of the errors has been found on *line 1*, *position *
 (as shown by :code:`L:   1 | P:   9`) and it's a problem with rule
-*L006* (for a full list of rules, see :ref:`ruleref`). From this
+*LT01* (for a full list of rules, see :ref:`ruleref`). From this
 (and the following error) we can see that the problem is that there
 is no space either side of the :code:`+` symbol in :code:`a+b`.
 Head into the file, and correct this issue so that the file now
@@ -110,19 +121,27 @@ looks like this:
     c AS bar from my_table
 
 Rerun the same command as before, and you'll see that the original
-error (violation of *L006*) no longer shows up.
+error (violation of *LT01*) no longer shows up.
 
 .. code-block:: text
 
     $ sqlfluff lint test.sql --dialect ansi
     == [test.sql] FAIL
-    L:   1 | P:   1 | L034 | Select wildcards then simple targets before calculations
-                           | and aggregates.
-    L:   1 | P:   1 | L036 | Select targets should be on a new line unless there is
+    L:   1 | P:   1 | LT09 | Select targets should be on a new line unless there is
                            | only one select target.
-    L:   1 | P:  13 | L039 | Unnecessary whitespace found.
-    L:   2 | P:   1 | L003 | Expected 1 indentations, found 0 [compared to line 01]
-    L:   2 | P:  10 | L010 | Keywords must be consistently upper case.
+                           | [layout.select_targets]
+    L:   1 | P:   1 | ST06 | Select wildcards then simple targets before calculations
+                           | and aggregates. [structure.column_order]
+    L:   1 | P:   7 | LT02 | Expected line break and indent of 4 spaces before 'a'.
+                           | [layout.indent]
+    L:   1 | P:  13 | LT01 | Expected only single space before 'AS' keyword. Found '
+                           | '. [layout.spacing]
+    L:   2 | P:   1 | LT02 | Expected indent of 4 spaces.
+                           | [layout.indent]
+    L:   2 | P:   9 | LT02 | Expected line break and no indent before 'from'.
+                           | [layout.indent]
+    L:   2 | P:  10 | CP01 | Keywords must be consistently upper case.
+                           | [capitalisation.keywords]
 
 To fix the remaining issues, we're going to use one of the more
 advanced features of *SQLFluff*, which is the *fix* command. This
@@ -132,17 +151,23 @@ and there may be some situations where a fix may not be able to be
 applied because of the context of the query, but in many simple cases
 it's a good place to start.
 
-For now, we only want to fix the following rules: *L003*, *L009*, *L010*
+For now, we only want to fix the following rules: *LT02*, *LT12*, *CP01*
 
 .. code-block:: text
 
-    $ sqlfluff fix test.sql --rules L003,L009,L010 --dialect ansi
+    $ sqlfluff fix test.sql --rules LT02,LT12,CP01 --dialect ansi
     ==== finding violations ====
     == [test.sql] FAIL
-    L:   2 | P:   1 | L003 | Expected 1 indentations, found 0 [compared to line 01]
-    L:   2 | P:  10 | L010 | Keywords must be consistently upper case.
+    L:   1 | P:   7 | LT02 | Expected line break and indent of 4 spaces before 'a'.
+                           | [layout.indent]
+    L:   2 | P:   1 | LT02 | Expected indent of 4 spaces.
+                           | [layout.indent]
+    L:   2 | P:   9 | LT02 | Expected line break and no indent before 'FROM'.
+                           | [layout.indent]
+    L:   2 | P:  10 | CP01 | Keywords must be consistently upper case.
+                           | [capitalisation.keywords]
     ==== fixing violations ====
-    2 fixable linting violations found
+    4 fixable linting violations found
     Are you sure you wish to attempt to fix these? [Y/n]
 
 ...at this point you'll have to confirm that you want to make the
@@ -161,17 +186,17 @@ now different.
 
 .. code-block:: sql
 
-    SELECT a + b  AS foo,
-        c AS bar FROM my_table
+    SELECT
+        a + b  AS foo,
+        c AS bar
+    FROM my_table
 
 In particular:
 
-* The second line has been indented to reflect being inside the
+* The two columns have been indented to reflect being inside the
   :code:`SELECT` statement.
 * The :code:`FROM` keyword has been capitalised to match the
   other keywords.
-* A final newline character has been added at the end of the
-  file (which may not be obvious in the snippet above).
 
 We could also fix *all* of the fixable errors by not
 specifying :code:`--rules`.
@@ -181,13 +206,12 @@ specifying :code:`--rules`.
     $ sqlfluff fix test.sql --dialect ansi
     ==== finding violations ====
     == [test.sql] FAIL
-    L:   1 | P:   1 | L034 | Select wildcards then simple targets before calculations
-                           | and aggregates.
-    L:   1 | P:   1 | L036 | Select targets should be on a new line unless there is
-                           | only one select target.
-    L:   1 | P:  13 | L039 | Unnecessary whitespace found.
+    L:   1 | P:   1 | ST06 | Select wildcards then simple targets before calculations
+                           | and aggregates. [structure.column_order]
+    L:   2 | P:  10 | LT01 | Expected only single space before 'AS' keyword. Found '
+                           | '. [layout.spacing]
     ==== fixing violations ====
-    3 fixable linting violations found
+    2 fixable linting violations found
     Are you sure you wish to attempt to fix these? [Y/n] ...
     Attempting fixes...
     Persisting Changes...
@@ -231,17 +255,17 @@ put the following content:
     [sqlfluff]
     dialect = ansi
 
-    [sqlfluff:rules]
+    [sqlfluff:indentation]
     tab_space_size = 2
 
-    [sqlfluff:rules:L010]
+    [sqlfluff:rules:CP01]
     capitalisation_policy = lower
 
 Then rerun the same command as before.
 
 .. code-block:: text
 
-    $ sqlfluff fix test.sql --rules L003,L009,L010,L034,L036,L039
+    $ sqlfluff fix test.sql --rules LT02,LT12,CP01,ST06,LT09,LT01
 
 Then examine the file again, and you'll notice that the
 file has been fixed accordingly.
@@ -249,11 +273,13 @@ file has been fixed accordingly.
 .. code-block:: sql
 
     select
-    c as bar,
-    a + b as foo
+      c as bar,
+      a + b as foo
     from my_table
 
 For a full list of configuration options check out :ref:`defaultconfig`.
+Note that in our example here we've only set a few configuration values
+and any other configuration settings remain as per the default config.
 To see how these options apply to specific rules check out the
 "Configuration" section within each rule's documentation in :ref:`ruleref`.
 
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 8b69f14..fc486e2 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -6,36 +6,16 @@ you're working with? Fluff is an extensible and modular linter designed
 to help you write good SQL and catch errors and bad SQL before it hits
 your database.
 
-Notable changes:
+Notable releases:
 
-* **0.1.x** involved a major re-write of the parser, completely changing
-  the behaviour of the tool with respect to complex parsing.
-* **0.2.x** added templating support and a big restructure of rules
-  and changed how users might interact with SQLFluff on templated code.
-* **0.3.x** drops support for python 2.7 and 3.4, and also reworks the
-  handling of indentation linting in a potentially not backward
-  compatible way.
-* **0.4.x** dropped python 3.5, added the dbt templater, source mapping and
-  also introduced the python API.
-* **0.5.x** introduced some breaking changes to the API.
-* **0.6.x** introduced parallel processing, which necessitated a big re-write
-  of several innards.
-* **0.7.x** extracted the dbt templater to a separate plugin and removed the
-  ``exasol_fs`` dialect (now merged in with the main ``exasol``).
-* **0.8.x** an improvement to the performance of the parser, a rebuild of the
-  Jinja Templater, and a progress bar for the CLI.
-* **0.9.x** refinement of the Simple API, dbt 1.0.0 compatibility,
-  and the official SQLFluff Docker image.
-* **0.10.x** removed support for older dbt versions < 0.20 and stopped ``fix``
-  attempting to fix unparsable SQL.
-* **0.11.x** rule L030 changed to use ``extended_capitalisation_policy``.
-* **0.12.x** dialect is now mandatory, the ``spark3`` dialect was renamed to
-  ``sparksql`` and  datatype capitalisation was extracted from L010 to it's own
-  rule L063.
-* **0.13.x** new rule for quoted literals, option to remove hanging indents in
-  rule L003, and introduction of ``ignore_words_regex``.
-* **1.0.0** first *stable* release, no major changes to take advantage of a
+* **1.0.x**: First *stable* release, no major changes to take advantage of a
   point of relative stability.
+* **2.0.x**: Recode of rules, whitespace fixing consolidation,
+  :code:`sqlfluff format` and removal of support for dbt versions pre `1.1`.
+  Note, that this release brings with it some breaking changes to rule coding
+  and configuration, see :ref:`upgrading_2_0`.
+
+For more detail on other releases, see our :ref:`releasenotes`.
 
 Want to see where and how people are using SQLFluff in their projects?
 Head over to :ref:`inthewildref` for inspiration.
@@ -53,11 +33,21 @@ have python or pip already installed see :ref:`gettingstartedref`.
     $ echo "  SELECT a  +  b FROM tbl;  " > test.sql
     $ sqlfluff lint test.sql --dialect ansi
     == [test.sql] FAIL
-    L:   1 | P:   1 | L050 | Files must not begin with newlines or whitespace.
-    L:   1 | P:   3 | L003 | First line has unexpected indent
-    L:   1 | P:  11 | L039 | Unnecessary whitespace found.
-    L:   1 | P:  14 | L039 | Unnecessary whitespace found.
-    L:   1 | P:  27 | L001 | Unnecessary trailing whitespace.
+    L:   1 | P:   1 | LT01 | Expected only single space before 'SELECT' keyword.
+                           | Found '  '. [layout.spacing]
+    L:   1 | P:   1 | LT02 | First line should not be indented.
+                           | [layout.indent]
+    L:   1 | P:   1 | LT13 | Files must not begin with newlines or whitespace.
+                           | [layout.start_of_file]
+    L:   1 | P:  11 | LT01 | Expected only single space before binary operator '+'.
+                           | Found '  '. [layout.spacing]
+    L:   1 | P:  14 | LT01 | Expected only single space before naked identifier.
+                           | Found '  '. [layout.spacing]
+    L:   1 | P:  27 | LT01 | Unnecessary trailing whitespace at end of file.
+                           | [layout.spacing]
+    L:   1 | P:  27 | LT12 | Files must end with a single trailing newline.
+                           | [layout.end-of-file]
+    All Finished 📜 🎉!
 
 Contents
 ^^^^^^^^
@@ -77,6 +67,7 @@ Contents
    configuration
    cli
    api
+   releasenotes
    internals
    developingrules
    developingplugins
diff --git a/docs/source/layout.rst b/docs/source/layout.rst
index 36e7953..cf13957 100644
--- a/docs/source/layout.rst
+++ b/docs/source/layout.rst
@@ -361,6 +361,52 @@ route left to consistency is to **not allow hanging indents**.
 Starting in 2.0.0, any hanging indents detected will be
 converted to traditional indents.
 
+.. _implicitindents:
+
+Implicit Indents
+^^^^^^^^^^^^^^^^
+
+A close cousin of the hanging indent is the *implicit indent*.
+While it does look a little like a hanging indent, it's much
+more consistent in its behaviour and is supported from SQLFluff
+2.0.0 onwards.
+
+An implicit indent is exactly like a normal indent, but doesn't
+have to be actually *taken* to influence the indentation of lines
+after it - it just needs to be left un-closed before the end of
+the line. These are normally available in clauses which take the
+form of :code:`KEYWORD <expression>`, like :code:`WHERE` clauses
+or :code:`CASE` expressions.
+
+.. code-block:: sql
+
+   -- This WHERE clause here takes advantage of an implicit indent.
+   SELECT *
+   FROM my_table
+   WHERE condition_a
+      AND condition_b;
+
+   -- With implicit indents disabled (which is currently the
+   -- default), the above formulation is not allowed, and instead
+   -- there should be a newline immediately after `WHERE` (which
+   -- is the location of the _implicit_ indent).
+   SELECT *
+   FROM my_table
+   WHERE
+      condition_a
+      AND condition_b;
+
+When addressing both indentation and line-length, implicit
+indents allow a slightly more compact layout, without significant
+drawbacks in legibility. They also enable a style much closer to
+some established style guides.
+
+They are however not recommended by many of the major style guides
+at time of writing (including the `dbt Labs SQL style guide`_
+and the `Mozilla SQL style guide`_), and so are disabled by default.
+To enable them, set the :code:`allow_implicit_indents` flag in
+:code:`sqluff.indentation` to :code:`True`.
+
 .. _templatedindents:
 
 Templated Indents
@@ -501,7 +547,8 @@ For example, the default indentation would be as follows:
       b
    FROM my_table
    JOIN another_table
-      ON condition1
+      ON
+         condition1
          AND condition2
 
 By setting your config file to:
@@ -520,13 +567,37 @@ Then the expected indentation will be:
       b
    FROM my_table
       JOIN another_table
-         ON condition1
+         ON
+            condition1
             AND condition2
 
 There is a similar :code:`indented_using_on` config (defaulted to :code:`True`)
 which can be set to :code:`False` to prevent the :code:`USING` or :code:`ON`
 clause from being indented, in which case the original SQL would become:
 
+.. code-block:: sql
+
+   SELECT
+      a,
+      b
+   FROM my_table
+   JOIN another_table
+   ON
+      condition1
+      AND condition2
+
+It's worth noting at this point, that for some users, the additional line
+break after :code:`ON` is unexpected, and this is a good example of an
+:ref:`implicit indent <implicitindents>`. By setting your config to:
+
+.. code-block:: cfg
+
+   [sqlfluff:indentation]
+   indented_using_on = False
+   allow_implicit_indents = True
+
+Then the expected indentation will be:
+
 .. code-block:: sql
 
    SELECT
@@ -540,7 +611,7 @@ clause from being indented, in which case the original SQL would become:
 There is also a similar :code:`indented_on_contents` config (defaulted to
 :code:`True`) which can be set to :code:`False` to align any :code:`AND`
 subsections of an :code:`ON` block with each other. If set to :code:`False`
-the original SQL would become:
+(assuming implicit indents are still enabled) the original SQL would become:
 
 .. code-block:: sql
 
@@ -553,8 +624,9 @@ the original SQL would become:
       AND condition2
 
 These can also be combined, so if :code:`indented_using_on` config is set to
-:code:`False`, and :code:`indented_on_contents` is also set to :code:`False`
-then the SQL would become:
+:code:`False`, :code:`indented_on_contents` is also set to :code:`False`, and
+:code:`allow_implicit_indents` is set tot :code:`True` then the SQL would
+become:
 
 .. code-block:: sql
 
@@ -583,6 +655,21 @@ indented within the :code:`WITH` clause:
 
    SELECT 1 FROM some_cte
 
+There is also a similar :code:`indented_then` config (defaulted to
+:code:`True`) which can be set to :code:`False` to allow :code:`THEN`
+without an indent after :code:`WHEN`:
+
+.. code-block:: sql
+
+   SELECT
+      a,
+      CASE
+         WHEN b >= 42 THEN
+            1
+         ELSE 0
+      END AS c
+   FROM some_table
+
 By default, *SQLFluff* aims to follow the most common approach
 to indentation. However, if you have other versions of indentation which are
 supported by published style guides, then please submit an issue on GitHub
@@ -624,10 +711,11 @@ available:
       the spacing before commas (as shown in the config above), where line
       breaks may be allowed, but if not they should *touch* the element before.
 
-   *  The value of :code:`inline` is effectively the same as :code:`touch`
-      but in addition, no line breaks are allowed. This is best illustrated
+   *  Both of the above can be qualified with the :code:`:inline` modifier -
+      which prevents newlines within the segment. This is best illustrated
       by the spacing found in a qualified identifier like
-      :code:`my_schema.my_table`.
+      :code:`my_schema.my_table` which uses `touch:inline` or other clauses
+      where we want to force some elements to be on the same line.
 
 *  **Line Position**: set using the :code:`line_position` option. By default
    this is unset, which implies no particular line position requirements. The
diff --git a/docs/source/partials/.gitignore b/docs/source/partials/.gitignore
new file mode 100644
index 0000000..7ef23f0
--- /dev/null
+++ b/docs/source/partials/.gitignore
@@ -0,0 +1,2 @@
+rule_table.rst
+rule_summaries.rst
diff --git a/docs/source/partials/README.md b/docs/source/partials/README.md
new file mode 100644
index 0000000..e9044ce
--- /dev/null
+++ b/docs/source/partials/README.md
@@ -0,0 +1,9 @@
+This folder is ignored from the main build and intended
+only for files included in others via the `.. include::`
+directive.
+
+Some of those files are also auto-generated by scripts,
+in which case they should be included in the `.gitignore`
+and not edited by hand.
+
+See docs/generate-rule-docs.py for more info.
diff --git a/docs/source/partials/starter_config.cfg b/docs/source/partials/starter_config.cfg
new file mode 100644
index 0000000..a1519b5
--- /dev/null
+++ b/docs/source/partials/starter_config.cfg
@@ -0,0 +1,68 @@
+[sqlfluff]
+
+# Supported dialects https://docs.sqlfluff.com/en/stable/dialects.html
+# Or run 'sqlfluff dialects'
+dialect = snowflake
+
+# One of [raw|jinja|python|placeholder]
+templater = jinja
+
+# Comma separated list of rules to exclude, or None
+# See https://docs.sqlfluff.com/en/stable/configuration.html#enabling-and-disabling-rules
+# AM04 (ambiguous.column_count) and ST06 (structure.column_order) are
+# two of the more controversial rules included to illustrate usage.
+exclude_rules = ambiguous.column_count, structure.column_order
+
+# The standard max_line_length is 80 in line with the convention of
+# other tools and several style guides. Many projects however prefer
+# something a little longer.
+# Set to zero or negative to disable checks.
+max_line_length = 120
+
+# CPU processes to use while linting.
+# The default is "single threaded" to allow easy debugging, but this
+# is often undesirable at scale.
+# If positive, just implies number of processes.
+# If negative or zero, implies number_of_cpus - specified_number.
+# e.g. -1 means use all processors but one. 0 means all cpus.
+processes = -1
+
+# If using the dbt templater, we recommend setting the project dir.
+[sqlfluff:templater:dbt]
+project_dir = ./
+
+[sqlfluff:indentation]
+# While implicit indents are not enabled by default. Many of the
+# SQLFluff maintainers do use them in their projects.
+allow_implicit_indents = true
+
+# The default configuration for aliasing rules is "consistent"
+# which will auto-detect the setting from the rest of the file. This
+# is less desirable in a new project and you may find this (slightly
+# more strict) setting more useful.
+[sqlfluff:rules:aliasing.table]
+aliasing = explicit
+[sqlfluff:rules:aliasing.column]
+aliasing = explicit
+[sqlfluff:rules:aliasing.length]
+min_alias_length = 3
+
+# The default configuration for capitalisation rules is "consistent"
+# which will auto-detect the setting from the rest of the file. This
+# is less desirable in a new project and you may find this (slightly
+# more strict) setting more useful.
+# Typically we find users rely on syntax highlighting rather than
+# capitalisation to distinguish between keywords and identifiers.
+# Clearly, if your organisation has already settled on uppercase
+# formatting for any of these syntax elements then set them to "upper".
+# See https://stackoverflow.com/questions/608196/why-should-i-capitalize-my-sql-keywords-is-there-a-good-reason
+[sqlfluff:rules:capitalisation.keywords]
+capitalisation_policy = lower
+[sqlfluff:rules:capitalisation.identifiers]
+capitalisation_policy = lower
+[sqlfluff:rules:capitalisation.functions]
+extended_capitalisation_policy = lower
+[sqlfluff:rules:capitalisation.literals]
+capitalisation_policy = lower
+[sqlfluff:rules:capitalisation.types]
+extended_capitalisation_policy = lower
diff --git a/docs/source/production.rst b/docs/source/production.rst
index 2dbb61e..2a8ec10 100644
--- a/docs/source/production.rst
+++ b/docs/source/production.rst
@@ -86,6 +86,8 @@ such as:
 * Configuring `diff-quality` to return an error code if the quality is too low
 * Troubleshooting
 
+.. _using-pre-commit:
+
 Using `pre-commit`_
 ^^^^^^^^^^^^^^^^^^^
 
@@ -137,7 +139,7 @@ like this:
         # additional_dependencies: ['<dbt-adapter>', 'sqlfluff-templater-dbt']
       - id: sqlfluff-fix
         # Arbitrary arguments to show an example
-        # args: [--rules, "L003,L014"]
+        # args: [--rules, "LT02,CP02"]
         # additional_dependencies: ['<dbt-adapter>', 'sqlfluff-templater-dbt']
 
 When trying to use the `dbt templater`_, uncomment the
diff --git a/docs/source/releasenotes.rst b/docs/source/releasenotes.rst
new file mode 100644
index 0000000..56dc97a
--- /dev/null
+++ b/docs/source/releasenotes.rst
@@ -0,0 +1,232 @@
+.. _releasenotes:
+
+Release Notes
+=============
+
+This page aims to act as a guide for migrating between major SQLFluff
+releases. Necessarily this means that bugfix releases, or releases
+requiring no change for the user are not mentioned. For full details
+of each individual release, see the detailed changelog_.
+
+.. _changelog: https://github.com/sqlfluff/sqlfluff/blob/main/CHANGELOG.md
+
+.. _upgrading_2_0:
+
+
+Upgrading from 1.x to 2.0
+-------------------------
+
+Upgrading to 2.0 brings several important breaking changes:
+
+* All bundled rules have been recoded, both from generic :code:`L00X` formats
+  into groups within similar codes (e.g. an *aliasing* group with codes
+  of the format :code:`AL0X`), but also given *names* to allow much clearer
+  referencing (e.g. :code:`aliasing.column`).
+* :ref:`ruleconfig` now uses the rule *name* rather than the rule *code* to
+  specify the section. Any unrecognised references in config files (whether
+  they are references which *do* match existing rules by code or alias, or
+  whether the match no rules at all) will raise warnings at runtime.
+* A complete re-write of layout and whitespace handling rules (see
+  :ref:`layoutref`), and with that a change in how layout is configured
+  (see :ref:`layoutconfig`) and the combination of some rules that were
+  previously separate. One example of this is that the legacy rules
+  :code:`L001`, :code:`L005`, :code:`L006`, :code:`L008`, :code:`L023`,
+  :code:`L024`, :code:`L039`, :code:`L048` & :code:`L071` have been combined
+  simply into :sqlfluff:ref:`LT01`.
+
+Recommended upgrade steps
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To upgrade smoothly between versions, we recommend the following sequence:
+
+#. The upgrade path will be simpler if you have a slimmer configuration file.
+   Before upgrading, consider removing any sections from your configuration
+   file (often :code:`.sqlfluff`, see :ref:`config`) which match the current
+   :ref:`defaultconfig`. There is no need to respecify defaults in your local
+   config if they are not different to the stock config.
+
+#. In a local (or other *non-production*) environment, upgrade to SQLFluff
+   2.0.x. We recommend using a `compatible release`_ specifier such
+   as :code:`~=2.0.0`, to ensure any minor bugfix releases are automatically
+   included.
+
+#. Examine your configuration file (as mentioned above), and evaluate how
+   rules are currently specified. We recommend primarily using *either*
+   :code:`rules` *or* :code:`exclude_rules` rather than both, as detailed
+   in :ref:`ruleselection`. Using either the :code:`sqlfluff rules` CLI
+   command or the online :ref:`ruleref`, replace *all references* to legacy
+   rule codes (i.e. codes of the form :code:`L0XX`). Specifically:
+
+   * In the :code:`rules` and :code:`exclude_rules` config values. Here,
+     consider using group specifiers or names to make your config simpler
+     to read and understand (e.g. :code:`capitalisation`, is much more
+     understandable than :code:`CP01,CP02,CP03,CP04,CP05`, but the two
+     specifiers will have the same effect). Note that while legacy codes
+     *will still be understood* here (because they remain valid as aliases
+     for those rules) - you may find that some rules no longer exist in
+     isolation and so these references may be misleading. e.g. :code:`L005`
+     is now an alias for :sqlfluff:ref:`layout.spacing` but
+     that rule is much more broad ranging than the original scope of
+     :code:`L005`, which was only spacing around commas.
+
+   * In :ref:`ruleconfig`. In particular here, legacy references to rule
+     codes are *no longer valid*, will raise warnings, and until resolved,
+     the configuration in those sections will be ignored. The new section
+     references should include the rule *name* (e.g.
+     :code:`[sqlfluff:rules:capitalisation.keywords]` rather than
+     :code:`[sqlfluff:rules:L010]`). This switch is designed to make
+     configuration files more readable, but we cannot support backward
+     compatibility here without also having to resolve the potential
+     ambiguity of the scenario where both *code-based* and *name-based*
+     are both used.
+
+   * Review the :ref:`layoutconfig` documentation, and check whether any
+     indentation or layout configuration should be revised.
+
+#. Check your project for :ref:`in_file_config` which refer to rule codes.
+   Alter these in the same manner as described above for configuration files.
+
+#. Test linting your project for unexpected linting issues. Where found,
+   consider whether to use :code:`sqlfluff fix` to repair them in bulk,
+   or (if you disagree with the changes) consider changing which rules
+   you enable or their configuration accordingly. In particular you may notice:
+
+   * The indentation rule (:code:`L003` as was, now :sqlfluff:ref:`LT02`) has
+     had a significant rewrite, and while much more flexible and accurate, it
+     is also more specific. Note that :ref:`hangingindents` are no longer
+     supported, and that while not enabled by default, many users may find
+     the enabling :ref:`implicitindents` fits their organisation's style
+     better.
+
+   * The spacing rule (:sqlfluff:ref:`LT01`: :sqlfluff:ref:`layout.spacing`)
+     has a much wider scope, and so may pick up spacing issues that were not
+     previously enforced. If you disagree with any of these, you can
+     override the :code:`sqlfluff:layout` sections of the config with
+     different (or just more liberal settings, like :code:`any`).
+
+.. _`compatible release`: https://peps.python.org/pep-0440/#compatible-release
+
+
+Example 2.0 config
+^^^^^^^^^^^^^^^^^^
+
+To illustrate the points above, this is an illustrative example config
+for a 2.0 compatible project. Note that the config is fairly brief and
+sets only the values which differ from the default config.
+
+.. code-block:: cfg
+
+    [sqlfluff]
+    dialect = snowflake
+    templater = dbt
+    max_line_length = 120
+
+    # Exclude some specific rules based on a mixture of codes and names
+    exclude_rules = RF02, RF03, RF04, ST06, ST07, AM05, AM06, convention.left_join, layout.select_targets
+
+    [sqlfluff:indentation]
+    # Enabling implicit indents for this project.
+    # See https://docs.sqlfluff.com/en/stable/layout.html#configuring-indent-locations
+    allow_implicit_indents = True
+
+    # Add a few specific rule configurations, referenced by the rule names
+    # and not by the rule codes.
+    [sqlfluff:rules:capitalisation.keywords]
+    capitalisation_policy = lower
+
+    [sqlfluff:rules:capitalisation.identifiers]
+    capitalisation_policy = lower
+
+    [sqlfluff:rules:capitalisation.functions]
+    extended_capitalisation_policy = lower
+
+    # An example of setting a custom layout specification which
+    # is more lenient than default config.
+    [sqlfluff:layout:type:set_operator]
+    line_position = alone
+
+
+Upgrading to 1.4
+----------------
+
+This release brings several internal changes, and acts as a prelude
+to 2.0.0. In particular, the following config values have changed:
+
+* :code:`sqlfluff:rules:L007:operator_new_lines`` has been changed to
+  :code:`sqlfluff:layout:type:binary_operator:line_position`.
+* :code:`sqlfluff:rules:comma_style`` and
+  :code:`sqlfluff:rules:L019:comma_style` have both been consolidated
+  into :code:`sqlfluff:layout:type:comma:line_position`.
+
+If any of these values have been set in your config, they will be
+automatically translated to the new values at runtime, and a warning
+will be shown. To silence the warning, update your config file to the
+new values. For more details on configuring layout see :ref:`layoutconfig`.
+
+
+Upgrading to 1.3
+----------------
+
+This release brings several potentially breaking changes to the underlying
+parse tree. For users of the cli tool in a linting context you should notice
+no change. If however your application relies on the structure of the SQLFluff
+parse tree or the naming of certain elements within the yaml format, then
+this may not be a drop-in replacement. Specifically:
+
+* The addition of a new :code:`end_of_file`` meta segment at the end of
+  the parse structure.
+* The addition of a :code:`template_loop`` meta segment to signify a jump
+  backward in the source file within a loop structure (e.g. a jinja
+  :code:`for`` loop).
+* Much more specific types on some raw segments, in particular
+  :code:`identifier` and :code:`literal` type segments will now appear
+  in the parse tree with their more specific type (which used to be called
+  :code:`name`) e.g. :code:`naked_identifier`, :code:`quoted_identifier`,
+  :code:`numeric_literal` etc...
+
+If using the python api, the *parent* type (such as :code:`identifier`)
+will still register if you call :code:`.is_type("identifier")`, as this
+function checks all inherited types. However the eventual type returned
+by :code:`.get_type()`` will now be (in most cases) what used to be
+accessible at :code:`.name`. The :code:`name` attribute will be deprecated
+in a future release.
+
+
+Upgrading to 1.2
+----------------
+
+This release introduces the capability to automatically skip large files, and
+sets default limits on the maximum file size before a file is skipped. Users
+should see a performance gain, but may experience warnings associated with
+these skipped files.
+
+
+Upgrades pre 1.0
+----------------
+
+* **0.13.x** new rule for quoted literals, option to remove hanging indents in
+  rule L003, and introduction of ``ignore_words_regex``.
+* **0.12.x** dialect is now mandatory, the ``spark3`` dialect was renamed to
+  ``sparksql`` and  datatype capitalisation was extracted from L010 to it's own
+  rule L063.
+* **0.11.x** rule L030 changed to use ``extended_capitalisation_policy``.
+* **0.10.x** removed support for older dbt versions < 0.20 and stopped ``fix``
+  attempting to fix unparsable SQL.
+* **0.9.x** refinement of the Simple API, dbt 1.0.0 compatibility,
+  and the official SQLFluff Docker image.
+* **0.8.x** an improvement to the performance of the parser, a rebuild of the
+  Jinja Templater, and a progress bar for the CLI.
+* **0.7.x** extracted the dbt templater to a separate plugin and removed the
+  ``exasol_fs`` dialect (now merged in with the main ``exasol``).
+* **0.6.x** introduced parallel processing, which necessitated a big re-write
+  of several innards.
+* **0.5.x** introduced some breaking changes to the API.
+* **0.4.x** dropped python 3.5, added the dbt templater, source mapping and
+  also introduced the python API.
+* **0.3.x** drops support for python 2.7 and 3.4, and also reworks the
+  handling of indentation linting in a potentially not backward
+  compatible way.
+* **0.2.x** added templating support and a big restructure of rules
+  and changed how users might interact with SQLFluff on templated code.
+* **0.1.x** involved a major re-write of the parser, completely changing
+  the behaviour of the tool with respect to complex parsing.
diff --git a/docs/source/rules.rst b/docs/source/rules.rst
index 74f293b..e6047b2 100644
--- a/docs/source/rules.rst
+++ b/docs/source/rules.rst
@@ -7,7 +7,8 @@ Rules Reference
 which work their way through the parsed structure of a query to evaluate
 a particular rule or set of rules. The intent is that the definition of
 each specific rule should be really streamlined and only contain the logic
-for the rule itself, with all the other mechanics abstracted away.
+for the rule itself, with all the other mechanics abstracted away. To
+understand how rules are enabled and disabled see :ref:`ruleselection`.
 
 Core Rules
 ----------
@@ -33,29 +34,21 @@ and customize a rule set that best suites their organization.
 See the :ref:`config` section for more information on how to enable
 only :code:`core` rules by default.
 
-Specific Rules
---------------
-
-.. automodule:: sqlfluff.rules
-   :members:
-   :member-order: alphabetical
-
-.. _inline_ignoring_errors:
-
 Inline Ignoring Errors
 -----------------------
+
 `SQLFluff` features inline error ignoring. For example, the following will
 ignore the lack of whitespace surrounding the ``*`` operator.
 
 .. code-block:: sql
 
-   a.a*a.b AS bad_1  -- noqa: L006
+   a.a*a.b AS bad_1  -- noqa: LT01
 
 Multiple rules can be ignored by placing them in a comma-delimited list.
 
 .. code-block:: sql
 
-   a.a *  a.b AS bad_2,  -- noqa: L007, L006
+   a.a *  a.b AS bad_2,  -- noqa: LT01, LT03
 
 It is also possible to ignore non-rule based errors, and instead opt to
 ignore templating (``TMP``) & parsing (``PRS``) errors.
@@ -78,6 +71,7 @@ all rules on the given line.
 
    a.a*a.b AS bad_3  -- noqa
 
+.. _inline_ignoring_errors:
 
 Ignoring line ranges
 ^^^^^^^^^^^^^^^^^^^^
@@ -89,8 +83,8 @@ ignored until a corresponding `-- noqa:enable=<rule>[,...] | all` directive.
 
 .. code-block:: sql
 
-    -- Ignore rule L012 from this line forward
-    SELECT col_a a FROM foo -- noqa: disable=L012
+    -- Ignore rule AL02 from this line forward
+    SELECT col_a a FROM foo -- noqa: disable=AL02
 
     -- Ignore all rules from this line forward
     SELECT col_a a FROM foo -- noqa: disable=all
@@ -100,3 +94,10 @@ ignored until a corresponding `-- noqa:enable=<rule>[,...] | all` directive.
 
 
 .. _`pylint's "pylint" directive"`: http://pylint.pycqa.org/en/latest/user_guide/message-control.html
+
+Rule Index
+----------
+
+.. include:: partials/rule_table.rst
+
+.. include:: partials/rule_summaries.rst
diff --git a/docs/source/teamrollout.rst b/docs/source/teamrollout.rst
index 1438ddf..92b1a2b 100644
--- a/docs/source/teamrollout.rst
+++ b/docs/source/teamrollout.rst
@@ -103,10 +103,10 @@ this an empowering experience that everyone can get involved with
 rather than *another piece of admin they need to do*.
 
 At this stage, you might also want to consider other tools in the
-SQLFluff ecosystem such as the `SQLFluff pre-commit hook`_ and
-the `SQLFluff VSCode plugin`_ or `SQLFluff online formatter`_.
+SQLFluff ecosystem such as the :ref:`SQLFluff pre-commit hook
+<using-pre-commit>` and the `SQLFluff VSCode plugin`_ or `SQLFluff
+online formatter`_.
 
-.. _`SQLFluff pre-commit hook`: https://github.com/sqlfluff/sqlfluff-github-actions
 .. _`SQLFluff VSCode plugin`: https://github.com/sqlfluff/vscode-sqlfluff
 .. _`SQLFluff online formatter`: https://online.sqlfluff.com/
 
diff --git a/examples/01_basic_api_usage.py b/examples/01_basic_api_usage.py
index a267228..7921bf9 100644
--- a/examples/01_basic_api_usage.py
+++ b/examples/01_basic_api_usage.py
@@ -12,7 +12,7 @@ lint_result = sqlfluff.lint(my_bad_query, dialect="bigquery")
 # lint_result =
 # [
 #     {
-#         "code": "L010",
+#         "code": "CP01",
 #         "line_no": 1,
 #         "line_pos": 1,
 #         "description": "Keywords must be consistently upper case.",
@@ -27,11 +27,11 @@ fix_result_1 = sqlfluff.fix(my_bad_query, dialect="bigquery")
 # fix_result_1 = 'SELECT  *, 1, blah AS  foo  FROM myschema.mytable\n'
 
 # We can also fix just specific rules.
-fix_result_2 = sqlfluff.fix(my_bad_query, rules=["L010"])
+fix_result_2 = sqlfluff.fix(my_bad_query, rules=["CP01"])
 # fix_result_2 = 'SELECT  *, 1, blah AS  fOO  FROM mySchema.myTable'
 
 # Or a subset of rules...
-fix_result_3 = sqlfluff.fix(my_bad_query, rules=["L010", "L014"])
+fix_result_3 = sqlfluff.fix(my_bad_query, rules=["CP01", "CP02"])
 # fix_result_3 = 'SELECT  *, 1, blah AS  fOO  FROM myschema.mytable'
 
 #  -------- PARSING ----------
diff --git a/examples/03_getting_rules_and_dialects.py b/examples/03_getting_rules_and_dialects.py
index 08e5404..9366200 100644
--- a/examples/03_getting_rules_and_dialects.py
+++ b/examples/03_getting_rules_and_dialects.py
@@ -15,10 +15,10 @@ dialect_names = [dialect.label for dialect in dialects]
 rules = sqlfluff.list_rules()
 # rules = [
 #     RuleTuple(
-#         code='Example_L001',
+#         code='Example_LT01',
 #         description='ORDER BY on these columns is forbidden!'
 #     ),
 #     ...
 # ]
 rule_codes = [rule.code for rule in rules]
-# rule_codes = ["L001", "L002", ...]
+# rule_codes = ["LT01", "LT02", ...]
diff --git a/images/datacoves.png b/images/datacoves.png
new file mode 100644
index 0000000..279cd18
Binary files /dev/null and b/images/datacoves.png differ
diff --git a/plugins/sqlfluff-plugin-example/README.md b/plugins/sqlfluff-plugin-example/README.md
new file mode 100644
index 0000000..5bc58bf
--- /dev/null
+++ b/plugins/sqlfluff-plugin-example/README.md
@@ -0,0 +1,7 @@
+# Example rules plugin
+
+This example plugin showcases the ability to setup
+installable rule plugins.
+
+This interface is supported from version `0.4.0` of
+SQLFluff onwards.
diff --git a/plugins/sqlfluff-plugin-example/setup.py b/plugins/sqlfluff-plugin-example/setup.py
index cf78cad..2ab5528 100644
--- a/plugins/sqlfluff-plugin-example/setup.py
+++ b/plugins/sqlfluff-plugin-example/setup.py
@@ -1,4 +1,4 @@
-"""Setup file for example plugin."""
+"""Setup file for an example rules plugin."""
 from setuptools import find_packages, setup
 
 # Change these names in your plugin, e.g. company name or plugin purpose.
@@ -6,19 +6,13 @@ PLUGIN_LOGICAL_NAME = "example"
 PLUGIN_ROOT_MODULE = "example"
 
 setup(
-    name="sqlfluff-plugin-{plugin_logical_name}".format(
-        plugin_logical_name=PLUGIN_LOGICAL_NAME
-    ),
+    name=f"sqlfluff-plugin-{PLUGIN_LOGICAL_NAME}",
+    version="1.0.0",
     include_package_data=True,
     package_dir={"": "src"},
     packages=find_packages(where="src"),
     install_requires="sqlfluff>=0.4.0",
     entry_points={
-        "sqlfluff": [
-            "{plugin_logical_name} = {plugin_root_module}.rules".format(
-                plugin_logical_name=PLUGIN_LOGICAL_NAME,
-                plugin_root_module=PLUGIN_ROOT_MODULE,
-            )
-        ]
+        "sqlfluff": [f"sqlfluff_{PLUGIN_LOGICAL_NAME} = {PLUGIN_ROOT_MODULE}.rules"]
     },
 )
diff --git a/plugins/sqlfluff-plugin-example/src/example/rules.py b/plugins/sqlfluff-plugin-example/src/example/rules.py
index 2499ee4..7bd6a8a 100644
--- a/plugins/sqlfluff-plugin-example/src/example/rules.py
+++ b/plugins/sqlfluff-plugin-example/src/example/rules.py
@@ -1,4 +1,7 @@
-"""An example of a custom rule implemented through the plugin system."""
+"""An example of a custom rule implemented through the plugin system.
+
+This uses the rules API supported from 0.4.0 onwards.
+"""
 
 from sqlfluff.core.plugin import hookimpl
 from sqlfluff.core.rules import (
@@ -7,11 +10,6 @@ from sqlfluff.core.rules import (
     RuleContext,
 )
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
 from typing import List, Type
 import os.path
 from sqlfluff.core.config import ConfigLoader
@@ -42,9 +40,6 @@ def get_configs_info() -> dict:
 
 # These two decorators allow plugins
 # to be displayed in the sqlfluff docs
-@document_groups
-@document_fix_compatible
-@document_configuration
 class Rule_Example_L001(BaseRule):
     """ORDER BY on these columns is forbidden!
 
@@ -74,6 +69,7 @@ class Rule_Example_L001(BaseRule):
     groups = ("all",)
     config_keywords = ["forbidden_columns"]
     crawl_behaviour = SegmentSeekerCrawler({"orderby_clause"})
+    is_fix_compatible = True
 
     def __init__(self, *args, **kwargs):
         """Overwrite __init__ to set config."""
diff --git a/plugins/sqlfluff-plugin-example/test/rules/rule_test_cases_test.py b/plugins/sqlfluff-plugin-example/test/rules/rule_test_cases_test.py
index d6b9363..32548dc 100644
--- a/plugins/sqlfluff-plugin-example/test/rules/rule_test_cases_test.py
+++ b/plugins/sqlfluff-plugin-example/test/rules/rule_test_cases_test.py
@@ -1,7 +1,7 @@
 """Runs the rule test cases."""
 import os
 import pytest
-from sqlfluff.testing.rules import load_test_cases, rules__test_helper
+from sqlfluff.utils.testing.rules import load_test_cases, rules__test_helper
 
 ids, test_cases = load_test_cases(
     test_cases_path=os.path.join(
diff --git a/plugins/sqlfluff-templater-dbt/docker/init b/plugins/sqlfluff-templater-dbt/docker/init
new file mode 100755
index 0000000..078faa5
--- /dev/null
+++ b/plugins/sqlfluff-templater-dbt/docker/init
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+pip install --no-dependencies -e . -e plugins/sqlfluff-templater-dbt
+pushd plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project
+dbt deps
+popd
diff --git a/plugins/sqlfluff-templater-dbt/docker/shell b/plugins/sqlfluff-templater-dbt/docker/shell
index 1a4c56f..10bbc8f 100755
--- a/plugins/sqlfluff-templater-dbt/docker/shell
+++ b/plugins/sqlfluff-templater-dbt/docker/shell
@@ -1,3 +1,3 @@
 #!/usr/bin/env bash
 my_path="$( cd "$(dirname "$0")"; pwd -P)"
-docker-compose -f ${my_path}/docker-compose.yml exec app bash
+docker compose -f ${my_path}/docker-compose.yml exec app bash
diff --git a/plugins/sqlfluff-templater-dbt/docker/startup b/plugins/sqlfluff-templater-dbt/docker/startup
index 1709197..866354f 100755
--- a/plugins/sqlfluff-templater-dbt/docker/startup
+++ b/plugins/sqlfluff-templater-dbt/docker/startup
@@ -6,3 +6,4 @@ my_path="$( cd "$(dirname "$0")"; pwd -P)"
 ${my_path}/shutdown
 docker compose -f ${my_path}/docker-compose.yml build
 docker compose -f ${my_path}/docker-compose.yml up -d
+docker compose -f ${my_path}/docker-compose.yml exec app "/app/plugins/sqlfluff-templater-dbt/docker/init"
diff --git a/plugins/sqlfluff-templater-dbt/setup.cfg b/plugins/sqlfluff-templater-dbt/setup.cfg
index 874d19f..2641cc5 100644
--- a/plugins/sqlfluff-templater-dbt/setup.cfg
+++ b/plugins/sqlfluff-templater-dbt/setup.cfg
@@ -1,6 +1,6 @@
 [metadata]
 name = sqlfluff-templater-dbt
-version = 1.4.5
+version = 2.0.5
 description = Lint your dbt project SQL
 long_description = file: README.md
 long_description_content_type = text/markdown
@@ -43,8 +43,11 @@ keywords =
     formatter
     bigquery
     clickhouse
+    databricks
     db2
+    duckdb
     exasol
+    greenplum
     hive
     materialize
     mysql
@@ -62,14 +65,18 @@ keywords =
 packages = find:
 python_requires = >=3.7
 install_requires =
-    sqlfluff==1.4.5
-    dbt-core>=0.20.0
+    sqlfluff==2.0.5
+    dbt-core>=1.0.0
     jinja2-simple-tags>=0.3.1
     markupsafe
+    pydantic
+    rich
+    ruamel.yaml
 
 [options.packages.find]
 include =
     sqlfluff_templater_dbt
+    sqlfluff_templater_dbt.osmosis
 
 [options.entry_points]
 sqlfluff =
diff --git a/plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py b/plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py
index 26102c9..e9dd4a2 100644
--- a/plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py
+++ b/plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py
@@ -14,10 +14,20 @@ from dbt.config import read_user_config
 from dbt.config.runtime import RuntimeConfig as DbtRuntimeConfig
 from dbt.adapters.factory import register_adapter, get_adapter
 from dbt.compilation import Compiler as DbtCompiler
-from dbt.exceptions import (
-    CompilationException as DbtCompilationException,
-    FailedToConnectException as DbtFailedToConnectException,
-)
+
+try:
+    from dbt.exceptions import (
+        CompilationException as DbtCompilationException,
+        FailedToConnectException as DbtFailedToConnectException,
+        DbtProjectError,
+    )
+except ImportError:
+    from dbt.exceptions import (
+        CompilationError as DbtCompilationException,
+        FailedToConnectError as DbtFailedToConnectException,
+        DbtProjectError,
+    )
+
 from dbt import flags
 from jinja2 import Environment
 from jinja2_simple_tags import StandaloneTag
@@ -25,7 +35,7 @@ from jinja2_simple_tags import StandaloneTag
 from sqlfluff.cli.formatters import OutputStreamFormatter
 from sqlfluff.core import FluffConfig
 from sqlfluff.core.cached_property import cached_property
-from sqlfluff.core.errors import SQLTemplaterError, SQLFluffSkipFile
+from sqlfluff.core.errors import SQLTemplaterError, SQLFluffSkipFile, SQLFluffUserError
 
 from sqlfluff.core.templaters.base import TemplatedFile, large_file_check
 
@@ -39,15 +49,10 @@ DBT_VERSION = get_installed_version()
 DBT_VERSION_STRING = DBT_VERSION.to_version_string()
 DBT_VERSION_TUPLE = (int(DBT_VERSION.major), int(DBT_VERSION.minor))
 
-if DBT_VERSION_TUPLE >= (1, 0):
-    from dbt.flags import PROFILES_DIR
-else:
-    from dbt.config.profile import PROFILES_DIR
-
 if DBT_VERSION_TUPLE >= (1, 3):
     COMPILED_SQL_ATTRIBUTE = "compiled_code"
     RAW_SQL_ATTRIBUTE = "raw_code"
-else:
+else:  # pragma: no cover
     COMPILED_SQL_ATTRIBUTE = "compiled_sql"
     RAW_SQL_ATTRIBUTE = "raw_sql"
 
@@ -85,35 +90,29 @@ class DbtTemplater(JinjaTemplater):
         return [("templater", self.name), ("dbt", self.dbt_version)]
 
     @property
-    def dbt_version(self):
+    def dbt_version(self):  # pragma: no cover
         """Gets the dbt version."""
         return DBT_VERSION_STRING
 
-    @property
-    def dbt_version_tuple(self):
-        """Gets the dbt version as a tuple on (major, minor)."""
-        return DBT_VERSION_TUPLE
-
     @cached_property
     def dbt_config(self):
         """Loads the dbt config."""
-        if self.dbt_version_tuple >= (1, 0):
-            # Here, we read flags.PROFILE_DIR directly, prior to calling
-            # set_from_args(). Apparently, set_from_args() sets PROFILES_DIR
-            # to a lowercase version of the value, and the profile wouldn't be
-            # found if the directory name contained uppercase letters. This fix
-            # was suggested and described here:
-            # https://github.com/sqlfluff/sqlfluff/issues/2253#issuecomment-1018722979
-            user_config = read_user_config(flags.PROFILES_DIR)
-            flags.set_from_args(
-                DbtConfigArgs(
-                    project_dir=self.project_dir,
-                    profiles_dir=self.profiles_dir,
-                    profile=self._get_profile(),
-                    vars=self._get_cli_vars(),
-                ),
-                user_config,
-            )
+        # Here, we read flags.PROFILE_DIR directly, prior to calling
+        # set_from_args(). Apparently, set_from_args() sets PROFILES_DIR
+        # to a lowercase version of the value, and the profile wouldn't be
+        # found if the directory name contained uppercase letters. This fix
+        # was suggested and described here:
+        # https://github.com/sqlfluff/sqlfluff/issues/2253#issuecomment-1018722979
+        user_config = read_user_config(flags.PROFILES_DIR)
+        flags.set_from_args(
+            DbtConfigArgs(
+                project_dir=self.project_dir,
+                profiles_dir=self.profiles_dir,
+                profile=self._get_profile(),
+                vars=self._get_cli_vars(),
+            ),
+            user_config,
+        )
         self.dbt_config = DbtRuntimeConfig.from_args(
             DbtConfigArgs(
                 project_dir=self.project_dir,
@@ -152,6 +151,8 @@ class DbtTemplater(JinjaTemplater):
             # https://github.com/dbt-labs/dbt-core/issues/6055 is solved.
             os.chdir(self.project_dir)
             self.dbt_manifest = ManifestLoader.get_full_manifest(self.dbt_config)
+        except DbtProjectError as err:  # pragma: no cover
+            raise SQLFluffUserError(f"DbtProjectError: {err}")
         finally:
             os.chdir(old_cwd)
         return self.dbt_manifest
@@ -197,7 +198,7 @@ class DbtTemplater(JinjaTemplater):
                 self.sqlfluff_config.get_section(
                     (self.templater_selector, self.name, "profiles_dir")
                 )
-                or PROFILES_DIR
+                or flags.PROFILES_DIR
             )
         )
 
@@ -425,17 +426,12 @@ class DbtTemplater(JinjaTemplater):
             if os.path.abspath(macro.original_file_path) == abspath:
                 return "a macro"
 
-        if DBT_VERSION_TUPLE >= (1, 0):
-            # Scan disabled nodes.
-            for nodes in self.dbt_manifest.disabled.values():
-                for node in nodes:
-                    if os.path.abspath(node.original_file_path) == abspath:
-                        return "disabled"
-        else:
-            model_name = os.path.splitext(os.path.basename(fname))[0]
-            if self.dbt_manifest.find_disabled_by_name(name=model_name):
-                return "disabled"
-        return None
+        # Scan disabled nodes.
+        for nodes in self.dbt_manifest.disabled.values():
+            for node in nodes:
+                if os.path.abspath(node.original_file_path) == abspath:
+                    return "disabled"
+        return None  # pragma: no cover
 
     def _unsafe_process(self, fname, in_str=None, config=None):
         original_file_path = os.path.relpath(fname, start=os.getcwd())
@@ -491,7 +487,7 @@ class DbtTemplater(JinjaTemplater):
                     node=node,
                     manifest=self.dbt_manifest,
                 )
-            except Exception as err:
+            except Exception as err:  # pragma: no cover
                 templater_logger.exception(
                     "Fatal dbt compilation error on %s. This occurs most often "
                     "during incorrect sorting of ephemeral models before linting. "
@@ -513,7 +509,7 @@ class DbtTemplater(JinjaTemplater):
                 # If injected SQL is present, it contains a better picture
                 # of what will actually hit the database (e.g. with tests).
                 # However it's not always present.
-                compiled_sql = node.injected_sql
+                compiled_sql = node.injected_sql  # pragma: no cover
             else:
                 compiled_sql = getattr(node, COMPILED_SQL_ATTRIBUTE)
 
@@ -548,7 +544,7 @@ class DbtTemplater(JinjaTemplater):
             # compiling. Unless fixed (like below), this will cause:
             #    1. Assertion errors in TemplatedFile, when it sanity checks the
             #       contents of the sliced_file array.
-            #    2. L009 linting errors when running "sqlfluff lint foo_bar.sql"
+            #    2. LT12 linting errors when running "sqlfluff lint foo_bar.sql"
             #       since the linter will use the compiled code with the newlines
             #       removed.
             #    3. "No newline at end of file" warnings in Git/GitHub since
@@ -608,20 +604,17 @@ class DbtTemplater(JinjaTemplater):
         # We have to register the connection in dbt >= 1.0.0 ourselves
         # In previous versions, we relied on the functionality removed in
         # https://github.com/dbt-labs/dbt-core/pull/4062.
-        if DBT_VERSION_TUPLE >= (1, 0):
-            adapter = self.adapters.get(self.project_dir)
-            if adapter is None:
-                adapter = get_adapter(self.dbt_config)
-                self.adapters[self.project_dir] = adapter
-                adapter.acquire_connection("master")
-                adapter.set_relations_cache(self.dbt_manifest)
-
-            yield
-            # :TRICKY: Once connected, we never disconnect. Making multiple
-            # connections during linting has proven to cause major performance
-            # issues.
-        else:
-            yield
+        adapter = self.adapters.get(self.project_dir)
+        if adapter is None:
+            adapter = get_adapter(self.dbt_config)
+            self.adapters[self.project_dir] = adapter
+            adapter.acquire_connection("master")
+            adapter.set_relations_cache(self.dbt_manifest)
+
+        yield
+        # :TRICKY: Once connected, we never disconnect. Making multiple
+        # connections during linting has proven to cause major performance
+        # issues.
 
 
 class SnapshotExtension(StandaloneTag):
diff --git a/plugins/sqlfluff-templater-dbt/test/conftest.py b/plugins/sqlfluff-templater-dbt/test/conftest.py
new file mode 100644
index 0000000..6e2203b
--- /dev/null
+++ b/plugins/sqlfluff-templater-dbt/test/conftest.py
@@ -0,0 +1,14 @@
+"""pytest fixtures."""
+import os
+
+import pytest
+
+
+@pytest.fixture(scope="session", autouse=True)
+def dbt_flags():
+    """Set dbt flags for dbt templater tests."""
+    # Setting this to True disables some code in dbt-core that randomly runs
+    # some test code in core/dbt/parser/models.py, ModelParser. render_update().
+    # We've seen occasional runtime errors from that code:
+    # TypeError: cannot pickle '_thread.RLock' object
+    os.environ["DBT_USE_EXPERIMENTAL_PARSER"] = "True"
diff --git a/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/.sqlfluff b/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/.sqlfluff
index 4028449..9338703 100644
--- a/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/.sqlfluff
+++ b/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/.sqlfluff
@@ -1,7 +1,7 @@
 [sqlfluff]
 templater = dbt
 dialect = postgres
-# exclude_rules = L009
+# exclude_rules = LT12
 
 [sqlfluff:templater:dbt]
 profiles_dir = ./
diff --git a/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/AM03_test.sql b/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/AM03_test.sql
new file mode 100644
index 0000000..b4c7aac
--- /dev/null
+++ b/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/AM03_test.sql
@@ -0,0 +1,8 @@
+select
+    birth_date,
+    name
+from cows
+order by
+    birth_date asc,
+    name desc
+
diff --git a/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/L034_test.sql b/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/ST06_test.sql
similarity index 68%
rename from plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/L034_test.sql
rename to plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/ST06_test.sql
index a531a11..0c66387 100644
--- a/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/L034_test.sql
+++ b/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/ST06_test.sql
@@ -1,4 +1,4 @@
--- L034 should ignore this as one of the select targets uses a macro
+-- ST06 should ignore this as one of the select targets uses a macro
 
 select
     {{ dbt_utils.surrogate_key(['spots', 'moos']) }} as spot_moo_id,
diff --git a/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/call_statement.sql b/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/call_statement.sql
new file mode 100644
index 0000000..74265bb
--- /dev/null
+++ b/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/call_statement.sql
@@ -0,0 +1,5 @@
+{% call statement('unique_keys', fetch_result=True) %}
+  select 'tests' as key_name
+{% endcall %}
+{% set unique_keys = load_result('unique_keys') %}
+select 1, '{{ unique_keys.data[0][0] }}'
diff --git a/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/utf8/.sqlfluff b/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/utf8/.sqlfluff
index 09a23e9..32eeae1 100644
--- a/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/utf8/.sqlfluff
+++ b/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/utf8/.sqlfluff
@@ -1,4 +1,4 @@
 [sqlfluff]
 dialect = ansi
-rules = L010
+rules = CP01
 encoding = utf-8
diff --git a/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/packages.yml.jinja2 b/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/packages.yml
similarity index 72%
rename from plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/packages.yml.jinja2
rename to plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/packages.yml
index 83f4926..a87350e 100644
--- a/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/packages.yml.jinja2
+++ b/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/packages.yml
@@ -2,8 +2,4 @@ packages:
     # Reference: dbt_utils compatibility matrix:
     # https://docs.google.com/spreadsheets/d/1RoDdC69auAtrwiqmkRsgcFdZ3MdNpeKcJrWkmEpXVIs/edit#gid=0
     - package: dbt-labs/dbt_utils
-{% if DBT_VERSION_TUPLE < (1,0) %}
-      version: ["0.6.3"]
-{% else %}
       version: ["0.8.0"]
-{% endif %}
diff --git a/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/call_statement.sql b/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/call_statement.sql
new file mode 100644
index 0000000..35fc170
--- /dev/null
+++ b/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/call_statement.sql
@@ -0,0 +1,3 @@
+
+
+select 1, 'tests'
diff --git a/plugins/sqlfluff-templater-dbt/test/generate_packages_yml.py b/plugins/sqlfluff-templater-dbt/test/generate_packages_yml.py
deleted file mode 100644
index 9c3ab85..0000000
--- a/plugins/sqlfluff-templater-dbt/test/generate_packages_yml.py
+++ /dev/null
@@ -1,30 +0,0 @@
-"""Script used for dbt templater tests."""
-import os
-import sys
-from typing import Optional, Tuple
-
-from jinja2.sandbox import SandboxedEnvironment
-
-DBT_VERSION_TUPLE: Optional[Tuple[int, int]] = None
-try:
-    from sqlfluff_templater_dbt.templater import DBT_VERSION_TUPLE
-except ImportError:
-    pass
-
-
-def main(project_dir):
-    """Load Jinja template file expand it write to packages.yml."""
-    env = SandboxedEnvironment()
-    with open(os.path.join(project_dir, "packages.yml.jinja2")) as f:
-        template_str = f.read()
-    template = env.from_string(
-        template_str, globals=dict(DBT_VERSION_TUPLE=DBT_VERSION_TUPLE)
-    )
-    expanded = template.render()
-    with open(os.path.join(project_dir, "packages.yml"), "w") as f:
-        f.write(expanded)
-
-
-if __name__ == "__main__":
-    if DBT_VERSION_TUPLE is not None:
-        main(sys.argv[1])
diff --git a/plugins/sqlfluff-templater-dbt/test/rules_test.py b/plugins/sqlfluff-templater-dbt/test/rules_test.py
index 24bdc0b..c81f9fe 100644
--- a/plugins/sqlfluff-templater-dbt/test/rules_test.py
+++ b/plugins/sqlfluff-templater-dbt/test/rules_test.py
@@ -1,6 +1,7 @@
 """Tests for the standard set of rules."""
 import pytest
 import os
+import os.path
 from pathlib import Path
 
 from sqlfluff.core import Linter
@@ -18,9 +19,9 @@ from test.fixtures.dbt.templater import (  # noqa
     "rule,path,violations",
     [
         # Group By
-        ("L021", "models/my_new_project/select_distinct_group_by.sql", [(1, 8)]),
+        ("AM01", "models/my_new_project/select_distinct_group_by.sql", [(1, 8)]),
         # Multiple trailing newline
-        ("L009", "models/my_new_project/multiple_trailing_newline.sql", [(3, 1)]),
+        ("LT12", "models/my_new_project/multiple_trailing_newline.sql", [(3, 1)]),
     ],
 )
 def test__rules__std_file_dbt(rule, path, violations, project_dir):  # noqa
@@ -35,15 +36,24 @@ def test__rules__std_file_dbt(rule, path, violations, project_dir):  # noqa
 
 def test__rules__fix_utf8(project_dir):  # noqa
     """Verify that non-ASCII characters are preserved by 'fix'."""
-    rule = "L010"
+    rule = "CP01"
     path = "models/my_new_project/utf8/test.sql"
     lntr = Linter(
         config=FluffConfig(configs=DBT_FLUFF_CONFIG, overrides=dict(rules=rule))
     )
     lnt = lntr.lint_path(os.path.join(project_dir, path), fix=True)
+    # Check that we did actually find issues.
+    # NOTE: This test is mostly useful to distinguish between whether there's
+    # a problem with the rule - or a problem with the file.
+    violations_dict = lnt.violation_dict()
+    print("Violations Dict: ", violations_dict)
+    qual_path = os.path.normpath(Path(project_dir) / path)
+    assert qual_path in violations_dict, f"{path} not in violations dict."
+    assert violations_dict[qual_path], f"No issues found for {qual_path}."
     lnt.persist_changes(fixed_file_suffix="FIXED")
     # TODO: Check contents of file:
-    # ./plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/utf8/testFIXED.sql
+    # ./plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/
+    # my_new_project/utf8/testFIXED.sql
     # Against a git file, similar to the autofix tests
     fixed_path = Path(project_dir) / "models/my_new_project/utf8/testFIXED.sql"
     cmp_filepath = Path(project_dir) / "models/my_new_project/utf8/test.sql.fixed"
@@ -53,3 +63,16 @@ def test__rules__fix_utf8(project_dir):  # noqa
     # Assert that we fixed as expected
     assert fixed_buff == comp_buff
     os.unlink(fixed_path)
+
+
+def test__rules__order_by(project_dir):  # noqa
+    """Verify that rule AM03 works with dbt."""
+    rule = "AM03"
+    path = "models/my_new_project/AM03_test.sql"
+    lntr = Linter(
+        config=FluffConfig(configs=DBT_FLUFF_CONFIG, overrides=dict(rules=rule))
+    )
+    lnt = lntr.lint_path(os.path.join(project_dir, path))
+
+    violations = lnt.check_tuples()
+    assert len(violations) == 0
diff --git a/plugins/sqlfluff-templater-dbt/test/templater_test.py b/plugins/sqlfluff-templater-dbt/test/templater_test.py
index c48b18b..d7161d0 100644
--- a/plugins/sqlfluff-templater-dbt/test/templater_test.py
+++ b/plugins/sqlfluff-templater-dbt/test/templater_test.py
@@ -12,7 +12,7 @@ import pytest
 
 from sqlfluff.core import FluffConfig, Lexer, Linter
 from sqlfluff.core.errors import SQLFluffSkipFile
-from sqlfluff_templater_dbt.templater import DBT_VERSION_TUPLE
+from sqlfluff.utils.testing.logging import fluff_log_catcher
 from test.fixtures.dbt.templater import (  # noqa: F401
     DBT_FLUFF_CONFIG,
     dbt_templater,
@@ -83,6 +83,8 @@ def test__templater_dbt_profiles_dir_expanded(dbt_templater):  # noqa: F811
         "ends_with_whitespace_stripping.sql",
         # Access dbt graph nodes
         "access_graph_nodes.sql",
+        # Call statements
+        "call_statement.sql",
     ],
 )
 def test__templater_dbt_templating_result(
@@ -106,36 +108,34 @@ def test_dbt_profiles_dir_env_var_uppercase(
 
 def _run_templater_and_verify_result(dbt_templater, project_dir, fname):  # noqa: F811
     path = Path(project_dir) / "models/my_new_project" / fname
+    config = FluffConfig(configs=DBT_FLUFF_CONFIG)
     templated_file, _ = dbt_templater.process(
         in_str=path.read_text(),
         fname=str(path),
-        config=FluffConfig(configs=DBT_FLUFF_CONFIG),
+        config=config,
     )
     template_output_folder_path = Path(
         "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/"
     )
     fixture_path = _get_fixture_path(template_output_folder_path, fname)
     assert str(templated_file) == fixture_path.read_text()
+    # Check we can lex the output too.
+    # https://github.com/sqlfluff/sqlfluff/issues/4013
+    lexer = Lexer(config=config)
+    _, lexing_violations = lexer.lex(templated_file)
+    assert not lexing_violations
 
 
 def _get_fixture_path(template_output_folder_path, fname):
     fixture_path: Path = template_output_folder_path / fname  # Default fixture location
-    # Is there a version-specific version of the fixture file?
-    if DBT_VERSION_TUPLE >= (1, 0):
-        dbt_version_specific_fixture_folder = "dbt_utils_0.8.0"
-    else:
-        dbt_version_specific_fixture_folder = None
-
-    if dbt_version_specific_fixture_folder:
-        # Maybe. Determine where it would exist.
-        version_specific_path = (
-            Path(template_output_folder_path)
-            / dbt_version_specific_fixture_folder
-            / fname
-        )
-        if version_specific_path.is_file():
-            # Ok, it exists. Use this path instead.
-            fixture_path = version_specific_path
+    dbt_version_specific_fixture_folder = "dbt_utils_0.8.0"
+    # Determine where it would exist.
+    version_specific_path = (
+        Path(template_output_folder_path) / dbt_version_specific_fixture_folder / fname
+    )
+    if version_specific_path.is_file():
+        # Ok, it exists. Use this path instead.
+        fixture_path = version_specific_path
     return fixture_path
 
 
@@ -286,17 +286,30 @@ def test__templater_dbt_skips_file(
         "use_var.sql",
         "incremental.sql",
         "single_trailing_newline.sql",
-        "L034_test.sql",
+        "ST06_test.sql",
     ],
 )
 def test__dbt_templated_models_do_not_raise_lint_error(
-    project_dir, fname  # noqa: F811
+    project_dir, fname, caplog  # noqa: F811
 ):
     """Test that templated dbt models do not raise a linting error."""
-    lntr = Linter(config=FluffConfig(configs=DBT_FLUFF_CONFIG))
-    lnt = lntr.lint_path(
-        path=os.path.join(project_dir, "models/my_new_project/", fname)
-    )
+    linter = Linter(config=FluffConfig(configs=DBT_FLUFF_CONFIG))
+    # Log rules output.
+    with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules"):
+        lnt = linter.lint_path(
+            path=os.path.join(project_dir, "models/my_new_project/", fname)
+        )
+    for linted_file in lnt.files:
+        # Log the rendered file to facilitate better debugging of the files.
+        print(f"## FILE: {linted_file.path}")
+        print("\n\n## RENDERED FILE:\n\n")
+        print(linted_file.templated_file.templated_str)
+        print("\n\n## PARSED TREE:\n\n")
+        print(linted_file.tree.stringify())
+        print("\n\n## VIOLATIONS:")
+        for idx, v in enumerate(linted_file.violations):
+            print(f"   {idx}:{v.get_info_dict()}")
+
     violations = lnt.check_tuples()
     assert len(violations) == 0
 
@@ -384,9 +397,6 @@ def test__templater_dbt_handle_exceptions(
     assert violations[0].desc().replace("\\", "/").startswith(exception_msg)
 
 
-@pytest.mark.skipif(
-    DBT_VERSION_TUPLE < (1, 0), reason="mocks a function that's only used in dbt >= 1.0"
-)
 @mock.patch("dbt.adapters.postgres.impl.PostgresAdapter.set_relations_cache")
 def test__templater_dbt_handle_database_connection_failure(
     set_relations_cache, project_dir, dbt_templater  # noqa: F811
@@ -408,7 +418,7 @@ def test__templater_dbt_handle_database_connection_failure(
             project_dir, "models/my_new_project/exception_connect_database.sql"
         )
     )
-    dbt_fluff_config_fail = DBT_FLUFF_CONFIG.copy()
+    dbt_fluff_config_fail = deepcopy(DBT_FLUFF_CONFIG)
     dbt_fluff_config_fail["templater"]["dbt"][
         "profiles_dir"
     ] = "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles_yml_fail"
@@ -434,7 +444,7 @@ def test__templater_dbt_handle_database_connection_failure(
     )
 
 
-def test__project_dir_does_not_exist_error(dbt_templater, caplog):  # noqa: F811
+def test__project_dir_does_not_exist_error(dbt_templater):  # noqa: F811
     """Test an error is logged if the given dbt project directory doesn't exist."""
     dbt_templater.sqlfluff_config = FluffConfig(
         configs={
@@ -442,18 +452,11 @@ def test__project_dir_does_not_exist_error(dbt_templater, caplog):  # noqa: F811
             "templater": {"dbt": {"project_dir": "./non_existing_directory"}},
         }
     )
-    logger = logging.getLogger("sqlfluff")
-    original_propagate_value = logger.propagate
-    try:
-        logger.propagate = True
-        with caplog.at_level(logging.ERROR, logger="sqlfluff.templater"):
-            dbt_project_dir = dbt_templater._get_project_dir()
-        assert (
-            f"dbt_project_dir: {dbt_project_dir} could not be accessed. "
-            "Check it exists."
-        ) in caplog.text
-    finally:
-        logger.propagate = original_propagate_value
+    with fluff_log_catcher(logging.ERROR, "sqlfluff.templater") as caplog:
+        dbt_project_dir = dbt_templater._get_project_dir()
+    assert (
+        f"dbt_project_dir: {dbt_project_dir} could not be accessed. " "Check it exists."
+    ) in caplog.text
 
 
 @pytest.mark.parametrize(
diff --git a/pytest.ini b/pytest.ini
index dc0dfbd..30e9c09 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -1,4 +1,7 @@
 [pytest]
 markers =
-    dbt: marks tests needing the "dbt" plugin (deselect with '-m "not dbt"')
-    integration_test: marks integration tests
+    dbt: Marks tests needing the "dbt" plugin (deselect with '-m "not dbt"').
+    integration: Marks tests outside of the core suite.
+    parse_suite: Marks the suite of parsing tests across a range of dialects (part of integration).
+    fix_suite: Marks the suite of fixing tests across a range of dialects (part of integration).
+    rules_suite: Marks the suite of rules tests. Also known as the yaml tests (part of integration).
diff --git a/requirements.txt b/requirements.txt
index 3ffe405..5f8c0cf 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -28,7 +28,7 @@ regex
 # For returning exceptions from multiprocessing.Pool.map()
 tblib
 # For parsing pyproject.toml
-toml
+toml; python_version < '3.11'
 # For handling progress bars
 tqdm
 # better type hints for older python versions
diff --git a/requirements_dev.txt b/requirements_dev.txt
index 56a5079..5b91c0f 100644
--- a/requirements_dev.txt
+++ b/requirements_dev.txt
@@ -2,8 +2,10 @@
 # code linting and formatting
 flake8
 flake8-docstrings
+pydocstyle<6.2.0  # See: https://github.com/PyCQA/pydocstyle/issues/618
 black>=22.1.0
 flake8-black>=0.2.4
+ruff
 # documentation checks
 doc8
 Pygments
diff --git a/setup.cfg b/setup.cfg
index 9b26d5d..99d4f15 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
 [metadata]
 name = sqlfluff
-version = 1.4.5
+version = 2.0.5
 description = The SQL Linter for Humans
 long_description = file: README.md
 long_description_content_type = text/markdown
@@ -34,6 +34,7 @@ classifiers =
     Programming Language :: Python :: 3.10
     Programming Language :: Python :: 3.11
     Programming Language :: Python :: Implementation :: CPython
+    Programming Language :: SQL
     Topic :: Utilities
     Topic :: Software Development :: Quality Assurance
 keywords =
@@ -44,8 +45,11 @@ keywords =
     athena
     bigquery
     clickhouse
+    databricks
     db2
+    duckdb
     exasol
+    greenplum
     hive
     materialize
     mysql
@@ -95,7 +99,7 @@ install_requires =
     # For returning exceptions from multiprocessing.Pool.map()
     tblib
     # For parsing pyproject.toml
-    toml
+    toml; python_version < '3.11'
     # For handling progress bars
     tqdm
     # better type hints for older python versions
@@ -112,6 +116,19 @@ diff_cover =
     sqlfluff = sqlfluff.diff_quality_plugin
 sqlfluff =
     sqlfluff = sqlfluff.core.plugin.lib
+    # NOTE: We namespace the rules plugins with `rules`, because some
+    # of them might later collide with other types of plugins. In particular
+    # `tsql` may eventually refer to a dialect plugin and `jinja` may refer
+    # to a templater plugin.
+    sqlfluff_rules_capitalisation = sqlfluff.rules.capitalisation
+    sqlfluff_rules_aliasing = sqlfluff.rules.aliasing
+    sqlfluff_rules_layout = sqlfluff.rules.layout
+    sqlfluff_rules_references = sqlfluff.rules.references
+    sqlfluff_rules_ambiguous = sqlfluff.rules.ambiguous
+    sqlfluff_rules_structure = sqlfluff.rules.structure
+    sqlfluff_rules_convention = sqlfluff.rules.convention
+    sqlfluff_rules_jinja = sqlfluff.rules.jinja
+    sqlfluff_rules_tsql = sqlfluff.rules.tsql
 
 [options.package_data]
 sqlfluff =
@@ -120,4 +137,4 @@ sqlfluff =
     py.typed
 
 [sqlfluff_docs]
-stable_version = 1.4.5
+stable_version = 2.0.5
diff --git a/src/sqlfluff/cli/commands.py b/src/sqlfluff/cli/commands.py
index 58dd42d..068db5f 100644
--- a/src/sqlfluff/cli/commands.py
+++ b/src/sqlfluff/cli/commands.py
@@ -226,10 +226,10 @@ def core_options(f: Callable) -> Callable:
         default=None,
         help=(
             "Narrow the search to only specific rules. For example "
-            "specifying `--rules L001` will only search for rule `L001` (Unnecessary "
+            "specifying `--rules LT01` will only search for rule `LT01` (Unnecessary "
             "trailing whitespace). Multiple rules can be specified with commas e.g. "
-            "`--rules L001,L002` will specify only looking for violations of rule "
-            "`L001` and rule `L002`."
+            "`--rules LT01,LT02` will specify only looking for violations of rule "
+            "`LT01` and rule `LT02`."
         ),
     )(f)
     f = click.option(
@@ -238,12 +238,12 @@ def core_options(f: Callable) -> Callable:
         default=None,
         help=(
             "Exclude specific rules. For example "
-            "specifying `--exclude-rules L001` will remove rule `L001` (Unnecessary "
+            "specifying `--exclude-rules LT01` will remove rule `LT01` (Unnecessary "
             "trailing whitespace) from the set of considered rules. This could either "
             "be the allowlist, or the general set if there is no specific allowlist. "
             "Multiple rules can be specified with commas e.g. "
-            "`--exclude-rules L001,L002` will exclude violations of rule "
-            "`L001` and rule `L002`."
+            "`--exclude-rules LT01,LT02` will exclude violations of rule "
+            "`LT01` and rule `LT02`."
         ),
     )(f)
     f = click.option(
@@ -311,6 +311,43 @@ def core_options(f: Callable) -> Callable:
     return f
 
 
+def lint_options(f: Callable) -> Callable:
+    """Add lint operation options to commands via a decorator.
+
+    These are cli commands that do linting, i.e. `lint` and `fix`.
+    """
+    f = click.option(
+        "-p",
+        "--processes",
+        type=int,
+        default=None,
+        help=(
+            "The number of parallel processes to run. Positive numbers work as "
+            "expected. Zero and negative numbers will work as number_of_cpus - "
+            "number. e.g  -1 means all cpus except one. 0 means all cpus."
+        ),
+    )(f)
+    f = click.option(
+        "--disable_progress_bar",
+        "--disable-progress-bar",
+        is_flag=True,
+        help="Disables progress bars.",
+        cls=DeprecatedOption,
+        deprecated=["--disable_progress_bar"],
+    )(f)
+    f = click.option(
+        "--persist-timing",
+        default=None,
+        help=(
+            "A filename to persist the timing information for a linting run to "
+            "in csv format for external analysis. NOTE: This feature should be "
+            "treated as beta, and the format of the csv file may change in "
+            "future releases without warning."
+        ),
+    )(f)
+    return f
+
+
 def get_config(
     extra_config_path: Optional[str] = None,
     ignore_local_config: bool = False,
@@ -390,8 +427,8 @@ def get_linter_and_formatter(
     context_settings={"help_option_names": ["-h", "--help"]},
     epilog="""\b\bExamples:\n
   sqlfluff lint --dialect postgres .\n
-  sqlfluff lint --dialect postgres --rules L042 .\n
-  sqlfluff fix --dialect sqlite --rules L041,L042 src/queries\n
+  sqlfluff lint --dialect postgres --rules ST05 .\n
+  sqlfluff fix --dialect sqlite --rules LT10,ST05 src/queries\n
   sqlfluff parse --dialect sqlite --templater jinja src/queries/common.sql
 """,
 )
@@ -421,7 +458,20 @@ def rules(**kwargs) -> None:
     """Show the current rules in use."""
     c = get_config(**kwargs, dialect="ansi")
     lnt, formatter = get_linter_and_formatter(c)
-    click.echo(formatter.format_rules(lnt), color=c.get("color"))
+    try:
+        click.echo(formatter.format_rules(lnt), color=c.get("color"))
+    # No cover for clause covering poorly formatted rules.
+    # Without creating a poorly formed plugin, these are hard to
+    # test.
+    except (SQLFluffUserError, AssertionError) as err:  # pragma: no cover
+        click.echo(
+            OutputStreamFormatter.colorize_helper(
+                c.get("color"),
+                f"Error loading rules: {str(err)}",
+                color=Color.red,
+            )
+        )
+        sys.exit(EXIT_ERROR)
 
 
 @cli.command()
@@ -447,6 +497,7 @@ def dump_file_payload(filename: Optional[str], payload: str):
 @cli.command(cls=DeprecatedOptionsCommand)
 @common_options
 @core_options
+@lint_options
 @click.option(
     "-f",
     "--format",
@@ -485,35 +536,6 @@ def dump_file_payload(filename: Optional[str], payload: str):
     is_flag=True,
     help="Perform the operation regardless of .sqlfluffignore configurations",
 )
-@click.option(
-    "-p",
-    "--processes",
-    type=int,
-    default=None,
-    help=(
-        "The number of parallel processes to run. Positive numbers work as "
-        "expected. Zero and negative numbers will work as number_of_cpus - "
-        "number. e.g  -1 means all cpus except one. 0 means all cpus."
-    ),
-)
-@click.option(
-    "--disable_progress_bar",
-    "--disable-progress-bar",
-    is_flag=True,
-    help="Disables progress bars.",
-    cls=DeprecatedOption,
-    deprecated=["--disable_progress_bar"],
-)
-@click.option(
-    "--persist-timing",
-    default=None,
-    help=(
-        "A filename to persist the timing information for a linting run to "
-        "in csv format for external analysis. NOTE: This feature should be "
-        "treated as beta, and the format of the csv file may change in "
-        "future releases without warning."
-    ),
-)
 @click.argument("paths", nargs=-1, type=click.Path(allow_dash=True))
 def lint(
     paths: Tuple[str],
@@ -526,9 +548,9 @@ def lint(
     bench: bool = False,
     processes: Optional[int] = None,
     disable_progress_bar: Optional[bool] = False,
+    persist_timing: Optional[str] = None,
     extra_config_path: Optional[str] = None,
     ignore_local_config: bool = False,
-    persist_timing: Optional[str] = None,
     **kwargs,
 ) -> None:
     """Lint SQL files via passing a list of files or using stdin.
@@ -594,6 +616,8 @@ def lint(
         file_output = json.dumps(result.as_records())
     elif format == FormatType.yaml.value:
         file_output = yaml.dump(result.as_records(), sort_keys=False)
+    elif format == FormatType.none.value:
+        file_output = ""
     elif format == FormatType.github_annotation.value:
         if annotation_level == "error":
             annotation_level = "failure"
@@ -635,6 +659,8 @@ def lint(
                 line += f"col={violation['line_pos']}"
                 line += "::"
                 line += f"{violation['code']}: {violation['description']}"
+                if violation["name"]:
+                    line += f" [{violation['name']}]"
 
                 github_result_native.append(line)
 
@@ -653,7 +679,9 @@ def lint(
         timing_summary = result.timing_summary()
         for step in timing_summary:
             click.echo(f"=== {step} ===")
-            click.echo(formatter.cli_table(timing_summary[step].items()))
+            click.echo(
+                formatter.cli_table(timing_summary[step].items(), cols=3, col_width=20)
+            )
 
     if not nofail:
         if not non_human_output:
@@ -680,154 +708,68 @@ def do_fixes(lnt, result, formatter=None, **kwargs):
     return False  # pragma: no cover
 
 
-@cli.command(cls=DeprecatedOptionsCommand)
-@common_options
-@core_options
-@click.option(
-    "-f",
-    "--force",
-    is_flag=True,
-    help=(
-        "skip the confirmation prompt and go straight to applying "
-        "fixes. **Use this with caution.**"
-    ),
-)
-@click.option(
-    "-x",
-    "--fixed-suffix",
-    default=None,
-    help="An optional suffix to add to fixed files.",
-)
-@click.option(
-    "-p",
-    "--processes",
-    type=int,
-    default=None,
-    help=(
-        "The number of parallel processes to run. Positive numbers work as "
-        "expected. Zero and negative numbers will work as number_of_cpus - "
-        "number. e.g  -1 means all cpus except one. 0 means all cpus."
-    ),
-)
-@click.option(
-    "--disable_progress_bar",
-    "--disable-progress-bar",
-    is_flag=True,
-    help="Disables progress bars.",
-    cls=DeprecatedOption,
-    deprecated=["--disable_progress_bar"],
-)
-@click.option(
-    "--FIX-EVEN-UNPARSABLE",
-    is_flag=True,
-    default=None,
-    help=(
-        "Enables fixing of files that have templating or parse errors. "
-        "Note that the similar-sounding '--ignore' or 'noqa' features merely "
-        "prevent errors from being *displayed*. For safety reasons, the 'fix'"
-        "command will not make any fixes in files that have templating or parse "
-        "errors unless '--FIX-EVEN-UNPARSABLE' is enabled on the command line"
-        "or in the .sqlfluff config file."
-    ),
-)
-@click.option(
-    "--show-lint-violations",
-    is_flag=True,
-    help="Show lint violations",
-)
-@click.argument("paths", nargs=-1, type=click.Path(allow_dash=True))
-def fix(
-    force: bool,
-    paths: Tuple[str],
-    bench: bool = False,
-    fixed_suffix: str = "",
-    logger: Optional[logging.Logger] = None,
-    processes: Optional[int] = None,
-    disable_progress_bar: Optional[bool] = False,
-    extra_config_path: Optional[str] = None,
-    ignore_local_config: bool = False,
-    show_lint_violations: bool = False,
-    **kwargs,
-) -> None:
-    """Fix SQL files.
-
-    PATH is the path to a sql file or directory to lint. This can be either a
-    file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
-    character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
-    be interpreted like passing the current working directory as a path argument.
-    """
-    # some quick checks
-    fixing_stdin = ("-",) == paths
-
-    config = get_config(
-        extra_config_path, ignore_local_config, require_dialect=False, **kwargs
-    )
-    fix_even_unparsable = config.get("fix_even_unparsable")
-    output_stream = make_output_stream(
-        config, None, os.devnull if fixing_stdin else None
-    )
-    lnt, formatter = get_linter_and_formatter(config, output_stream)
-
-    verbose = config.get("verbose")
-    progress_bar_configuration.disable_progress_bar = disable_progress_bar
-
+def _stdin_fix(linter, formatter, fix_even_unparsable):
+    """Handle fixing from stdin."""
     exit_code = EXIT_SUCCESS
+    stdin = sys.stdin.read()
 
-    formatter.dispatch_config(lnt)
-
-    # Set up logging.
-    set_logging_level(
-        verbosity=verbose,
-        formatter=formatter,
-        logger=logger,
-        stderr_output=fixing_stdin,
-    )
-
-    # handle stdin case. should output formatted sql to stdout and nothing else.
-    if fixing_stdin:
-        stdin = sys.stdin.read()
-
-        result = lnt.lint_string_wrapped(stdin, fname="stdin", fix=True)
-        templater_error = result.num_violations(types=SQLTemplaterError) > 0
-        unfixable_error = result.num_violations(types=SQLLintError, fixable=False) > 0
-        if not fix_even_unparsable:
-            exit_code = formatter.handle_files_with_tmp_or_prs_errors(result)
-
-        if result.num_violations(types=SQLLintError, fixable=True) > 0:
-            stdout = result.paths[0].files[0].fix_string()[0]
-        else:
-            stdout = stdin
+    result = linter.lint_string_wrapped(stdin, fname="stdin", fix=True)
+    templater_error = result.num_violations(types=SQLTemplaterError) > 0
+    unfixable_error = result.num_violations(types=SQLLintError, fixable=False) > 0
+    if not fix_even_unparsable:
+        exit_code = formatter.handle_files_with_tmp_or_prs_errors(result)
 
-        if templater_error:
-            click.echo(
-                formatter.colorize(
-                    "Fix aborted due to unparsable template variables.",
-                    Color.red,
-                ),
-                err=True,
-            )
-            click.echo(
-                formatter.colorize(
-                    "Use --FIX-EVEN-UNPARSABLE' to attempt to fix the SQL anyway.",
-                    Color.red,
-                ),
-                err=True,
-            )
+    if result.num_violations(types=SQLLintError, fixable=True) > 0:
+        stdout = result.paths[0].files[0].fix_string()[0]
+    else:
+        stdout = stdin
 
-        if unfixable_error:
-            click.echo(
-                formatter.colorize("Unfixable violations detected.", Color.red),
-                err=True,
-            )
+    if templater_error:
+        click.echo(
+            formatter.colorize(
+                "Fix aborted due to unparsable template variables.",
+                Color.red,
+            ),
+            err=True,
+        )
+        click.echo(
+            formatter.colorize(
+                "Use --FIX-EVEN-UNPARSABLE' to attempt to fix the SQL anyway.",
+                Color.red,
+            ),
+            err=True,
+        )
 
-        click.echo(stdout, nl=False)
-        sys.exit(EXIT_FAIL if templater_error or unfixable_error else exit_code)
+    if unfixable_error:
+        click.echo(
+            formatter.colorize("Unfixable violations detected.", Color.red),
+            err=True,
+        )
 
+    click.echo(stdout, nl=False)
+    sys.exit(EXIT_FAIL if templater_error or unfixable_error else exit_code)
+
+
+def _paths_fix(
+    linter,
+    formatter,
+    paths,
+    processes,
+    fix_even_unparsable,
+    force,
+    fixed_suffix,
+    bench,
+    show_lint_violations,
+    warn_force: bool = True,
+    persist_timing: Optional[str] = None,
+):
+    """Handle fixing from paths."""
     # Lint the paths (not with the fix argument at this stage), outputting as we go.
     click.echo("==== finding fixable violations ====")
+    exit_code = EXIT_SUCCESS
 
     with PathAndUserErrorHandler(formatter):
-        result = lnt.lint_paths(
+        result = linter.lint_paths(
             paths,
             fix=True,
             ignore_non_existent_files=False,
@@ -846,11 +788,13 @@ def fix(
             "linting violations found"
         )
         if force:
-            click.echo(
-                f"{formatter.colorize('FORCE MODE', Color.red)}: Attempting fixes..."
-            )
+            if warn_force:
+                click.echo(
+                    f"{formatter.colorize('FORCE MODE', Color.red)}: "
+                    "Attempting fixes..."
+                )
             success = do_fixes(
-                lnt,
+                linter,
                 result,
                 formatter,
                 types=SQLLintError,
@@ -867,7 +811,7 @@ def fix(
             if c in ("y", "\r", "\n"):
                 click.echo("Attempting fixes...")
                 success = do_fixes(
-                    lnt,
+                    linter,
                     result,
                     formatter,
                     types=SQLLintError,
@@ -907,7 +851,9 @@ def fix(
         timing_summary = result.timing_summary()
         for step in timing_summary:
             click.echo(f"=== {step} ===")
-            click.echo(formatter.cli_table(timing_summary[step].items()))
+            click.echo(
+                formatter.cli_table(timing_summary[step].items(), cols=3, col_width=20)
+            )
 
     if show_lint_violations:
         click.echo("==== lint for unfixable violations ====")
@@ -919,9 +865,213 @@ def fix(
             for violation in violations:
                 click.echo(formatter.format_violation(violation))
 
+    if persist_timing:
+        result.persist_timing_records(persist_timing)
+
     sys.exit(exit_code)
 
 
+@cli.command(cls=DeprecatedOptionsCommand)
+@common_options
+@core_options
+@lint_options
+@click.option(
+    "-f",
+    "--force",
+    is_flag=True,
+    help=(
+        "skip the confirmation prompt and go straight to applying "
+        "fixes. **Use this with caution.**"
+    ),
+)
+@click.option(
+    "-x",
+    "--fixed-suffix",
+    default=None,
+    help="An optional suffix to add to fixed files.",
+)
+@click.option(
+    "--FIX-EVEN-UNPARSABLE",
+    is_flag=True,
+    default=None,
+    help=(
+        "Enables fixing of files that have templating or parse errors. "
+        "Note that the similar-sounding '--ignore' or 'noqa' features merely "
+        "prevent errors from being *displayed*. For safety reasons, the 'fix'"
+        "command will not make any fixes in files that have templating or parse "
+        "errors unless '--FIX-EVEN-UNPARSABLE' is enabled on the command line"
+        "or in the .sqlfluff config file."
+    ),
+)
+@click.option(
+    "--show-lint-violations",
+    is_flag=True,
+    help="Show lint violations",
+)
+@click.argument("paths", nargs=-1, type=click.Path(allow_dash=True))
+def fix(
+    force: bool,
+    paths: Tuple[str],
+    bench: bool = False,
+    fixed_suffix: str = "",
+    logger: Optional[logging.Logger] = None,
+    processes: Optional[int] = None,
+    disable_progress_bar: Optional[bool] = False,
+    persist_timing: Optional[str] = None,
+    extra_config_path: Optional[str] = None,
+    ignore_local_config: bool = False,
+    show_lint_violations: bool = False,
+    **kwargs,
+) -> None:
+    """Fix SQL files.
+
+    PATH is the path to a sql file or directory to lint. This can be either a
+    file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
+    character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
+    be interpreted like passing the current working directory as a path argument.
+    """
+    # some quick checks
+    fixing_stdin = ("-",) == paths
+
+    config = get_config(
+        extra_config_path, ignore_local_config, require_dialect=False, **kwargs
+    )
+    fix_even_unparsable = config.get("fix_even_unparsable")
+    output_stream = make_output_stream(
+        config, None, os.devnull if fixing_stdin else None
+    )
+    lnt, formatter = get_linter_and_formatter(config, output_stream)
+
+    verbose = config.get("verbose")
+    progress_bar_configuration.disable_progress_bar = disable_progress_bar
+
+    formatter.dispatch_config(lnt)
+
+    # Set up logging.
+    set_logging_level(
+        verbosity=verbose,
+        formatter=formatter,
+        logger=logger,
+        stderr_output=fixing_stdin,
+    )
+
+    # handle stdin case. should output formatted sql to stdout and nothing else.
+    if fixing_stdin:
+        _stdin_fix(lnt, formatter, fix_even_unparsable)
+    else:
+        _paths_fix(
+            lnt,
+            formatter,
+            paths,
+            processes,
+            fix_even_unparsable,
+            force,
+            fixed_suffix,
+            bench,
+            show_lint_violations,
+            persist_timing=persist_timing,
+        )
+
+
+@cli.command(name="format", cls=DeprecatedOptionsCommand)
+@common_options
+@core_options
+@lint_options
+@click.option(
+    "-x",
+    "--fixed-suffix",
+    default=None,
+    help="An optional suffix to add to fixed files.",
+)
+@click.argument("paths", nargs=-1, type=click.Path(allow_dash=True))
+def cli_format(
+    paths: Tuple[str],
+    bench: bool = False,
+    fixed_suffix: str = "",
+    logger: Optional[logging.Logger] = None,
+    processes: Optional[int] = None,
+    disable_progress_bar: Optional[bool] = False,
+    persist_timing: Optional[str] = None,
+    extra_config_path: Optional[str] = None,
+    ignore_local_config: bool = False,
+    **kwargs,
+) -> None:
+    """Autoformat SQL files.
+
+    This effectively force applies `sqlfluff fix` with a known subset of fairly
+    stable rules. Enabled rules are ignored, but rule exclusions (via CLI) or
+    config are still respected.
+
+    PATH is the path to a sql file or directory to lint. This can be either a
+    file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
+    character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
+    be interpreted like passing the current working directory as a path argument.
+    """
+    # some quick checks
+    fixing_stdin = ("-",) == paths
+
+    if kwargs.get("rules"):
+        click.echo(
+            "Specifying rules is not supported for sqlfluff format.",
+        )
+        sys.exit(EXIT_ERROR)
+
+    # Override rules for sqlfluff format
+    kwargs["rules"] = (
+        # All of the capitalisation rules
+        "capitalisation,"
+        # All of the layout rules
+        "layout,"
+        # Safe rules from other groups
+        "ambiguous.union,"
+        "convention.not_equal,"
+        "convention.coalesce,"
+        "convention.select_trailing_comma,"
+        "convention.is_null,"
+        "jinja.padding,"
+        "structure.distinct,"
+    )
+
+    config = get_config(
+        extra_config_path, ignore_local_config, require_dialect=False, **kwargs
+    )
+    output_stream = make_output_stream(
+        config, None, os.devnull if fixing_stdin else None
+    )
+    lnt, formatter = get_linter_and_formatter(config, output_stream)
+
+    verbose = config.get("verbose")
+    progress_bar_configuration.disable_progress_bar = disable_progress_bar
+
+    formatter.dispatch_config(lnt)
+
+    # Set up logging.
+    set_logging_level(
+        verbosity=verbose,
+        formatter=formatter,
+        logger=logger,
+        stderr_output=fixing_stdin,
+    )
+
+    # handle stdin case. should output formatted sql to stdout and nothing else.
+    if fixing_stdin:
+        _stdin_fix(lnt, formatter, fix_even_unparsable=False)
+    else:
+        _paths_fix(
+            lnt,
+            formatter,
+            paths,
+            processes,
+            fix_even_unparsable=False,
+            force=True,  # Always force in format mode.
+            fixed_suffix=fixed_suffix,
+            bench=bench,
+            show_lint_violations=False,
+            warn_force=False,  # don't warn about being in force mode.
+            persist_timing=persist_timing,
+        )
+
+
 def quoted_presenter(dumper, data):
     """Re-presenter which always double quotes string values needing escapes."""
     if "\n" in data or "\t" in data or "'" in data:
@@ -961,6 +1111,7 @@ def quoted_presenter(dumper, data):
             FormatType.human.value,
             FormatType.json.value,
             FormatType.yaml.value,
+            FormatType.none.value,
         ],
         case_sensitive=False,
     ),
@@ -1091,6 +1242,8 @@ def parse(
             file_output = yaml.dump(parsed_strings_dict, sort_keys=False)
         elif format == FormatType.json.value:
             file_output = json.dumps(parsed_strings_dict)
+        elif format == FormatType.none.value:
+            file_output = ""
 
         # Dump the output to stdout or to file as appropriate.
         dump_file_payload(write_output, file_output)
diff --git a/src/sqlfluff/cli/formatters.py b/src/sqlfluff/cli/formatters.py
index bde94af..199f87b 100644
--- a/src/sqlfluff/cli/formatters.py
+++ b/src/sqlfluff/cli/formatters.py
@@ -185,10 +185,14 @@ class OutputStreamFormatter:
         if self._verbosity > 1:
             self._dispatch(self.format_filename(filename=fname, success="PARSING"))
 
-    def dispatch_lint_header(self, fname: str) -> None:
+    def dispatch_lint_header(self, fname: str, rules: List[str]) -> None:
         """Dispatch the header displayed before linting."""
         if self._verbosity > 1:
-            self._dispatch(self.format_filename(filename=fname, success="LINTING"))
+            self._dispatch(
+                self.format_filename(
+                    filename=fname, success=f"LINTING ({', '.join(rules)})"
+                )
+            )
 
     def dispatch_compilation_header(self, templater, message):
         """Dispatch the header displayed before linting."""
@@ -410,6 +414,12 @@ class OutputStreamFormatter:
         elif violation.warning:
             desc = "WARNING: " + desc  # pragma: no cover
 
+        # If the rule has a name, add that the description.
+        if hasattr(violation, "rule"):
+            rule = getattr(violation, "rule", None)
+            if rule and rule.name:
+                desc += f" [{self.colorize(rule.name, Color.lightgrey)}]"
+
         split_desc = split_string_on_spaces(desc, line_length=max_line_length - 25)
 
         out_buff = ""
@@ -491,13 +501,38 @@ class OutputStreamFormatter:
             )
         return text_buffer.getvalue()
 
+    def _format_rule_description(self, rule) -> str:
+        """Format individual rule.
+
+        This is a helper function in .format_rules().
+        """
+        if rule.name:
+            name = self.colorize(rule.name, Color.blue)
+            description = f"[{name}] {rule.description}"
+        else:
+            description = rule.description
+
+        if rule.groups:
+            groups = self.colorize(", ".join(rule.groups), Color.lightgrey)
+            description += f"\ngroups: {groups}"
+        if rule.aliases:
+            aliases = self.colorize(", ".join(rule.aliases), Color.lightgrey)
+            description += f" aliases: {aliases}"
+        return description
+
     def format_rules(self, linter: Linter, verbose: int = 0) -> str:
         """Format the a set of rules given a `Linter`."""
         text_buffer = StringIO()
         text_buffer.write("==== sqlfluff - rules ====\n")
         text_buffer.write(
             self.cli_table(
-                linter.rule_tuples(),
+                [
+                    (
+                        t.code,
+                        self._format_rule_description(t),
+                    )
+                    for t in linter.rule_tuples()
+                ],
                 col_width=80,
                 cols=1,
                 label_color=Color.blue,
diff --git a/src/sqlfluff/cli/helpers.py b/src/sqlfluff/cli/helpers.py
index 102c925..6ed5c6d 100644
--- a/src/sqlfluff/cli/helpers.py
+++ b/src/sqlfluff/cli/helpers.py
@@ -47,7 +47,10 @@ def wrap_field(
         label_list = [label]
 
     max_val_width = width - len(sep_char) - label_width
-    val_list = wrap_elem(val, width=max_val_width)
+    val_list = []
+    for v in val.split("\n"):
+        val_list.extend(wrap_elem(v, width=max_val_width))
+
     return dict(
         label_list=label_list,
         val_list=val_list,
diff --git a/src/sqlfluff/core/config.py b/src/sqlfluff/core/config.py
index 8d8ee2f..774e2e5 100644
--- a/src/sqlfluff/core/config.py
+++ b/src/sqlfluff/core/config.py
@@ -4,6 +4,7 @@ import logging
 import os
 import os.path
 import configparser
+import sys
 from dataclasses import dataclass
 
 import pluggy
@@ -15,7 +16,10 @@ from sqlfluff.core.errors import SQLFluffUserError
 
 import appdirs
 
-import toml
+if sys.version_info >= (3, 11):
+    import tomllib
+else:  # pragma: no cover
+    import toml as tomllib
 
 # Instantiate the config logger
 config_logger = logging.getLogger("sqlfluff.config")
@@ -40,7 +44,78 @@ class _RemovedConfig:
 
 REMOVED_CONFIGS = [
     _RemovedConfig(
-        ("rules", "L007", "operator_new_lines"),
+        ("rules", "L003", "hanging_indents"),
+        (
+            "Hanging indents are no longer supported in SQLFluff "
+            "from version 2.0.0 onwards. See "
+            "https://docs.sqlfluff.com/en/stable/layout.html#hanging-indents"
+        ),
+    ),
+    _RemovedConfig(
+        ("rules", "max_line_length"),
+        (
+            "The max_line_length config has moved "
+            "from sqlfluff:rules to the root sqlfluff level."
+        ),
+        ("max_line_length",),
+        (lambda x: x),
+    ),
+    _RemovedConfig(
+        ("rules", "tab_space_size"),
+        (
+            "The tab_space_size config has moved "
+            "from sqlfluff:rules to sqlfluff:indentation."
+        ),
+        ("indentation", "tab_space_size"),
+        (lambda x: x),
+    ),
+    _RemovedConfig(
+        ("rules", "L002", "tab_space_size"),
+        (
+            "The tab_space_size config has moved "
+            "from sqlfluff:rules to sqlfluff:indentation."
+        ),
+        ("indentation", "tab_space_size"),
+        (lambda x: x),
+    ),
+    _RemovedConfig(
+        ("rules", "L003", "tab_space_size"),
+        (
+            "The tab_space_size config has moved "
+            "from sqlfluff:rules to sqlfluff:indentation."
+        ),
+        ("indentation", "tab_space_size"),
+        (lambda x: x),
+    ),
+    _RemovedConfig(
+        ("rules", "L004", "tab_space_size"),
+        (
+            "The tab_space_size config has moved "
+            "from sqlfluff:rules to sqlfluff:indentation."
+        ),
+        ("indentation", "tab_space_size"),
+        (lambda x: x),
+    ),
+    _RemovedConfig(
+        ("rules", "L016", "tab_space_size"),
+        (
+            "The tab_space_size config has moved "
+            "from sqlfluff:rules to sqlfluff:indentation."
+        ),
+        ("indentation", "tab_space_size"),
+        (lambda x: x),
+    ),
+    _RemovedConfig(
+        ("rules", "indent_unit"),
+        (
+            "The indent_unit config has moved "
+            "from sqlfluff:rules to sqlfluff:indentation."
+        ),
+        ("indentation", "indent_unit"),
+        (lambda x: x),
+    ),
+    _RemovedConfig(
+        ("rules", "LT03", "operator_new_lines"),
         (
             "Use the line_position config in the appropriate "
             "sqlfluff:layout section (e.g. sqlfluff:layout:type"
@@ -59,9 +134,9 @@ REMOVED_CONFIGS = [
         ("layout", "type", "comma", "line_position"),
         (lambda x: x),
     ),
-    # L019 used to have a more specific version of the same /config itself.
+    # LT04 used to have a more specific version of the same /config itself.
     _RemovedConfig(
-        ("rules", "L019", "comma_style"),
+        ("rules", "LT04", "comma_style"),
         (
             "Use the line_position config in the appropriate "
             "sqlfluff:layout section (e.g. sqlfluff:layout:type"
@@ -193,8 +268,16 @@ def dict_diff(left: dict, right: dict, ignore: Optional[List[str]] = None) -> di
     return buff
 
 
-def _split_comma_separated_string(raw_str: str) -> List[str]:
-    return [s.strip() for s in raw_str.split(",") if s.strip()]
+def split_comma_separated_string(raw: Union[str, List[str]]) -> List[str]:
+    """Converts comma separated string to List, stripping whitespace."""
+    if isinstance(raw, str):
+        return [s.strip() for s in raw.split(",") if s.strip()]
+    if isinstance(raw, list):
+        return raw
+    raise SQLFluffUserError(
+        f"Expected list or comma separated string. Got {type(raw)}"
+        f" instead for value {raw}."
+    )
 
 
 class ConfigLoader:
@@ -220,9 +303,45 @@ class ConfigLoader:
         return global_loader
 
     @classmethod
-    def _walk_toml(cls, config: Dict[str, Any], base_key=()):
-        """Recursively walk the nested config inside a TOML file."""
-        buff: List[tuple] = []
+    def _walk_toml(
+        cls, config: Dict[str, Any], base_key: Tuple[str, ...] = ()
+    ) -> List[Tuple[Tuple[str, ...], Any]]:
+        """Recursively walk the nested config inside a TOML file.
+
+        For standard usage it mimics the standard loader.
+
+        >>> ConfigLoader._walk_toml({"foo": "bar"})
+        [(('foo',), 'bar')]
+        >>> ConfigLoader._walk_toml({"foo": {"bar": "baz"}})
+        [(('foo', 'bar'), 'baz')]
+
+        For the "rules" section, there's a special handling
+        to condense nested sections from the toml for rules
+        which contain a dot (or more) (".") in their name.
+
+        >>> ConfigLoader._walk_toml({"rules": {"a": {"b": {"c": "d"}}}})
+        [(('rules', 'a.b', 'c'), 'd')]
+        >>> ConfigLoader._walk_toml({"rules":
+        ...     {"capitalisation": {"keywords":
+        ...         {"capitalisation_policy": "upper"}
+        ...     }}
+        ... })
+        [(('rules', 'capitalisation.keywords', 'capitalisation_policy'), 'upper')]
+
+        NOTE: Some rules make have more than one dot in their name.
+        >>> ConfigLoader._walk_toml({"rules":
+        ...     {"a": {"b": {"c": {"d": {"e": "f"}}}}}
+        ... })
+        [(('rules', 'a.b.c.d', 'e'), 'f')]
+        """
+        buff: List[Tuple[Tuple[str, ...], Any]] = []
+        # NOTE: For the "rules" section of the sqlfluff config,
+        # rule names are often qualified with a dot ".". In the
+        # toml scenario this can get interpreted as a nested
+        # section, and we resolve that edge case here.
+        if len(base_key) == 3 and base_key[0] == "rules":
+            base_key = ("rules", ".".join(base_key[1:]))
+
         for k, v in config.items():
             key = base_key + (k,)
             if isinstance(v, dict):
@@ -276,7 +395,8 @@ class ConfigLoader:
         The return value is a list of tuples, were each tuple has two elements,
         the first is a tuple of paths, the second is the value at that path.
         """
-        config = toml.load(fpath)
+        with open(fpath, mode="r") as file:
+            config = tomllib.loads(file.read())
         tool = config.get("tool", {}).get("sqlfluff", {})
 
         return cls._walk_toml(tool)
@@ -327,7 +447,7 @@ class ConfigLoader:
                 # Attempt to resolve paths
                 if name.lower() == "load_macros_from_path":
                     # Comma-separated list of paths.
-                    paths = _split_comma_separated_string(val)
+                    paths = split_comma_separated_string(val)
                     v_temp = []
                     for path in paths:
                         v_temp.append(cls._resolve_path(fpath, path))
@@ -675,11 +795,12 @@ class FluffConfig:
             ("ignore", "ignore"),
             ("warnings", "warnings"),
             ("rules", "rule_allowlist"),
-            # Allowlists and denylists
+            # Allowlists and denylistsignore_words
             ("exclude_rules", "rule_denylist"),
         ]:
             if self._configs["core"].get(in_key, None):
-                self._configs["core"][out_key] = _split_comma_separated_string(
+                # Checking if key is string as can potentially be a list to
+                self._configs["core"][out_key] = split_comma_separated_string(
                     self._configs["core"][in_key]
                 )
             else:
@@ -689,19 +810,25 @@ class FluffConfig:
             self._configs["core"]["recurse"] = True
 
         # Dialect and Template selection.
+        dialect: Optional[str] = self._configs["core"]["dialect"]
+        self._initialise_dialect(dialect, require_dialect)
+
+        self._configs["core"]["templater_obj"] = self.get_templater(
+            self._configs["core"]["templater"]
+        )
+
+    def _initialise_dialect(
+        self, dialect: Optional[str], require_dialect: bool = True
+    ) -> None:
         # NB: We import here to avoid a circular references.
         from sqlfluff.core.dialects import dialect_selector
 
-        dialect: Optional[str] = self._configs["core"]["dialect"]
         if dialect is not None:
             self._configs["core"]["dialect_obj"] = dialect_selector(
                 self._configs["core"]["dialect"]
             )
         elif require_dialect:
             self.verify_dialect_specified()
-        self._configs["core"]["templater_obj"] = self.get_templater(
-            self._configs["core"]["templater"]
-        )
 
     def verify_dialect_specified(self) -> None:
         """Check if the config specifies a dialect, raising an error if not."""
@@ -726,6 +853,10 @@ class FluffConfig:
         state = self.__dict__.copy()
         # Remove the unpicklable entries.
         del state["_plugin_manager"]
+        # The dbt templater doesn't pickle well, but isn't required
+        # within threaded operations. If it was, it could easily be
+        # rehydrated within the thread.
+        state["_configs"]["core"].pop("templater_obj", None)
         return state
 
     def __setstate__(self, state):  # pragma: no cover
@@ -741,6 +872,9 @@ class FluffConfig:
         # process invocations of sqlfluff. In the event that user registered
         # rules are used in a multi-process invocation, they will not be applied
         # in the child processes.
+        # NOTE: Likewise we don't reinstate the "templater_obj" config value
+        # which should also only be used in the main thread rather than child
+        # processes.
 
     @classmethod
     def from_root(
@@ -821,6 +955,7 @@ class FluffConfig:
         if exclude_rules:
             # Make a comma separated string to pass in as override
             overrides["exclude_rules"] = ",".join(exclude_rules)
+
         return cls(overrides=overrides, require_dialect=require_dialect)
 
     def get_templater(self, templater_name="jinja", **kwargs):
@@ -975,6 +1110,9 @@ class FluffConfig:
         config_path = [elem.strip() for elem in config_line.split(":")]
         # Set the value
         self.set_value(config_path[:-1], config_path[-1])
+        # If the config is for dialect, initialise the dialect
+        if config_path[:-1] == ["dialect"]:
+            self._initialise_dialect(config_path[-1])
 
     def process_raw_file_for_config(self, raw_str: str):
         """Process a full raw file for inline config and update self."""
diff --git a/src/sqlfluff/core/default_config.cfg b/src/sqlfluff/core/default_config.cfg
index 96b49be..e376aad 100644
--- a/src/sqlfluff/core/default_config.cfg
+++ b/src/sqlfluff/core/default_config.cfg
@@ -20,7 +20,7 @@ output_line_length = 80
 runaway_limit = 10
 # Ignore errors by category (one or more of the following, separated by commas: lexing,linting,parsing,templating)
 ignore = None
-# Warn only for rule codes (one of more rule codes, seperated by commas: e.g. L001,L002)
+# Warn only for rule codes (one of more rule codes, seperated by commas: e.g. LT01,LT02)
 # Also works for templating and parsing errors by using TMP or PRS
 warnings = None
 # Ignore linting errors found within sections of code coming directly from
@@ -50,14 +50,30 @@ large_file_skip_byte_limit = 20000
 # If negative or zero, implies number_of_cpus - specified_number.
 # e.g. -1 means use all processors but one. 0  means all cpus.
 processes = 1
+# Max line length is set by default to be in line with the dbt style guide.
+# https://github.com/dbt-labs/corp/blob/main/dbt_style_guide.md
+# Set to zero or negative to disable checks.
+max_line_length = 80
 
 [sqlfluff:indentation]
 # See https://docs.sqlfluff.com/en/stable/layout.html#configuring-indent-locations
+indent_unit = space
+tab_space_size = 4
 indented_joins = False
 indented_ctes = False
 indented_using_on = True
 indented_on_contents = True
+indented_then = True
+allow_implicit_indents = False
 template_blocks_indent = True
+# This is a comma seperated list of elements to skip
+# indentation edits to.
+skip_indentation_in = script_content
+# If comments are found at the end of long lines, we default to moving
+# them to the line _before_ their current location as the convention is
+# that a comment precedes the line it describes. However if you prefer
+# comments moved _after_, this configuration setting can be set to "after".
+trailing_comments = before
 
 # Layout configuration
 # See https://docs.sqlfluff.com/en/stable/layout.html#configuring-layout-and-spacing
@@ -91,8 +107,18 @@ spacing_after = touch
 [sqlfluff:layout:type:end_square_bracket]
 spacing_before = touch
 
+[sqlfluff:layout:type:start_angle_bracket]
+spacing_after = touch
+
+[sqlfluff:layout:type:end_angle_bracket]
+spacing_before = touch
+
 [sqlfluff:layout:type:casting_operator]
 spacing_before = touch
+spacing_after = touch:inline
+
+[sqlfluff:layout:type:slice]
+spacing_before = touch
 spacing_after = touch
 
 [sqlfluff:layout:type:comparison_operator]
@@ -104,12 +130,98 @@ spacing_within = touch
 line_position = leading
 
 [sqlfluff:layout:type:object_reference]
-spacing_within = inline
+spacing_within = touch:inline
+
+[sqlfluff:layout:type:numeric_literal]
+spacing_within = touch:inline
+
+[sqlfluff:layout:type:sign_indicator]
+spacing_after = touch:inline
+
+[sqlfluff:layout:type:tilde]
+spacing_after = touch:inline
+
+[sqlfluff:layout:type:function_name]
+spacing_within = touch:inline
+spacing_after = touch:inline
+
+[sqlfluff:layout:type:array_type]
+spacing_within = touch:inline
+
+[sqlfluff:layout:type:typed_array_literal]
+spacing_within = touch
+
+[sqlfluff:layout:type:sized_array_type]
+spacing_within = touch
+
+[sqlfluff:layout:type:struct_type]
+spacing_within = touch:inline
+
+[sqlfluff:layout:type:bracketed_arguments]
+spacing_before = touch:inline
+
+[sqlfluff:layout:type:typed_struct_literal]
+spacing_within = touch
+
+[sqlfluff:layout:type:semi_structured_expression]
+spacing_within = touch:inline
+spacing_before = touch:inline
+
+[sqlfluff:layout:type:array_accessor]
+spacing_before = touch:inline
+
+[sqlfluff:layout:type:colon]
+spacing_before = touch
 
 [sqlfluff:layout:type:comment]
 spacing_before = any
 spacing_after = any
 
+[sqlfluff:layout:type:placeholder]
+# Placeholders exist "outside" the rendered SQL syntax
+# so we shouldn't enforce any particular spacing around
+# them.
+spacing_before = any
+spacing_after = any
+
+[sqlfluff:layout:type:common_table_expression]
+# The definition part of a CTE should fit on one line where possible.
+# For users which regularly define column names in their CTEs they
+# may which to relax this config to just `single`.
+spacing_within = single:inline
+
+# By setting a selection of clauses to "alone", we hint to the reflow
+# algorithm that in the case of a long single line statement, the
+# first place to add newlines would be around these clauses.
+# Setting this to "alone:strict" would always _force_ line breaks
+# around them even if the line isn't too long.
+[sqlfluff:layout:type:select_clause]
+line_position = alone
+
+[sqlfluff:layout:type:where_clause]
+line_position = alone
+
+[sqlfluff:layout:type:from_clause]
+line_position = alone
+
+[sqlfluff:layout:type:join_clause]
+line_position = alone
+
+[sqlfluff:layout:type:groupby_clause]
+line_position = alone
+
+[sqlfluff:layout:type:orderby_clause]
+# NOTE: Order by clauses appear in many places other than in a select
+# clause. To avoid unexpected behaviour we use `leading` in this
+# case rather than `alone`.
+line_position = leading
+
+[sqlfluff:layout:type:having_clause]
+line_position = alone
+
+[sqlfluff:layout:type:limit_clause]
+line_position = alone
+
 # Template loop tokens shouldn't dictate spacing around them.
 [sqlfluff:layout:type:template_loop]
 spacing_before = any
@@ -123,118 +235,125 @@ apply_dbt_builtins = True
 
 # Some rules can be configured directly from the config common to other rules
 [sqlfluff:rules]
-tab_space_size = 4
-# Max line length is set by default to be in line with the dbt style guide.
-# https://github.com/dbt-labs/corp/blob/main/dbt_style_guide.md
-max_line_length = 80
-indent_unit = space
 allow_scalar = True
 single_table_references = consistent
 unquoted_identifiers_policy = all
 
-# Some rules have their own specific config
-[sqlfluff:rules:L003]
-hanging_indents = True
-
-[sqlfluff:rules:L010]
+[sqlfluff:rules:capitalisation.keywords]
 # Keywords
 capitalisation_policy = consistent
 # Comma separated list of words to ignore for this rule
 ignore_words = None
 ignore_words_regex = None
 
-[sqlfluff:rules:L011]
-# Aliasing preference for tables
-aliasing = explicit
-
-[sqlfluff:rules:L012]
-# Aliasing preference for columns
-aliasing = explicit
-
-[sqlfluff:rules:L014]
+[sqlfluff:rules:capitalisation.identifiers]
 # Unquoted identifiers
 extended_capitalisation_policy = consistent
 # Comma separated list of words to ignore for this rule
 ignore_words = None
 ignore_words_regex = None
 
-[sqlfluff:rules:L016]
-# Line length
-ignore_comment_lines = False
-ignore_comment_clauses = False
-
-[sqlfluff:rules:L027]
+[sqlfluff:rules:capitalisation.functions]
+# Function names
+extended_capitalisation_policy = consistent
 # Comma separated list of words to ignore for this rule
 ignore_words = None
 ignore_words_regex = None
 
-[sqlfluff:rules:L026]
-# References must be in FROM clause
-# Disabled for some dialects (e.g. bigquery)
-force_enable = False
-
-[sqlfluff:rules:L028]
-# References must be consistently used
-# Disabled for some dialects (e.g. bigquery)
-force_enable = False
-
-[sqlfluff:rules:L029]
-# Keywords should not be used as identifiers.
-unquoted_identifiers_policy = aliases
-quoted_identifiers_policy = none
+[sqlfluff:rules:capitalisation.literals]
+# Null & Boolean Literals
+capitalisation_policy = consistent
 # Comma separated list of words to ignore for this rule
 ignore_words = None
 ignore_words_regex = None
 
-[sqlfluff:rules:L030]
-# Function names
+[sqlfluff:rules:capitalisation.types]
+# Data Types
 extended_capitalisation_policy = consistent
 # Comma separated list of words to ignore for this rule
 ignore_words = None
 ignore_words_regex = None
 
-[sqlfluff:rules:L031]
+[sqlfluff:rules:ambiguous.join]
+# Fully qualify JOIN clause
+fully_qualify_join_types = inner
+
+[sqlfluff:rules:ambiguous.column_references]
+# GROUP BY/ORDER BY column references
+group_by_and_order_by_style = consistent
+
+[sqlfluff:rules:aliasing.table]
+# Aliasing preference for tables
+aliasing = explicit
+
+[sqlfluff:rules:aliasing.column]
+# Aliasing preference for columns
+aliasing = explicit
+
+[sqlfluff:rules:aliasing.length]
+min_alias_length = None
+max_alias_length = None
+
+[sqlfluff:rules:aliasing.forbid]
 # Avoid table aliases in from clauses and join conditions.
-# Disabled for some dialects (e.g. bigquery)
+# Disabled by default for all dialects unless explicitly enabled.
+# We suggest instead using aliasing.length (AL06) in most cases.
 force_enable = False
 
-[sqlfluff:rules:L036]
-wildcard_policy = single
-
-[sqlfluff:rules:L038]
+[sqlfluff:rules:convention.select_trailing_comma]
 # Trailing commas
 select_clause_trailing_comma = forbid
 
-[sqlfluff:rules:L040]
-# Null & Boolean Literals
-capitalisation_policy = consistent
-# Comma separated list of words to ignore for this rule
-ignore_words = None
-ignore_words_regex = None
-
-[sqlfluff:rules:L042]
-# By default, allow subqueries in from clauses, but not join clauses
-forbid_subquery_in = join
-
-[sqlfluff:rules:L047]
+[sqlfluff:rules:convention.count_rows]
 # Consistent syntax to count all rows
 prefer_count_1 = False
 prefer_count_0 = False
 
-[sqlfluff:rules:L051]
-# Fully qualify JOIN clause
-fully_qualify_join_types = inner
-
-[sqlfluff:rules:L052]
+[sqlfluff:rules:convention.terminator]
 # Semi-colon formatting approach
 multiline_newline = False
 require_final_semicolon = False
 
-[sqlfluff:rules:L054]
-# GROUP BY/ORDER BY column references
-group_by_and_order_by_style = consistent
+[sqlfluff:rules:convention.blocked_words]
+# Comma separated list of blocked words that should not be used
+blocked_words = None
+blocked_regex = None
+match_source = False
+
+[sqlfluff:rules:convention.quoted_literals]
+# Consistent usage of preferred quotes for quoted literals
+preferred_quoted_literal_style = consistent
+# Disabled for dialects that do not support single and double quotes for quoted literals (e.g. Postgres)
+force_enable = False
+
+[sqlfluff:rules:convention.casting_style]
+# SQL type casting
+preferred_type_casting_style = consistent
+
+[sqlfluff:rules:references.from]
+# References must be in FROM clause
+# Disabled for some dialects (e.g. bigquery)
+force_enable = False
+
+[sqlfluff:rules:references.qualification]
+# Comma separated list of words to ignore for this rule
+ignore_words = None
+ignore_words_regex = None
+
+[sqlfluff:rules:references.consistent]
+# References must be consistently used
+# Disabled for some dialects (e.g. bigquery)
+force_enable = False
+
+[sqlfluff:rules:references.keywords]
+# Keywords should not be used as identifiers.
+unquoted_identifiers_policy = aliases
+quoted_identifiers_policy = none
+# Comma separated list of words to ignore for this rule
+ignore_words = None
+ignore_words_regex = None
 
-[sqlfluff:rules:L057]
+[sqlfluff:rules:references.special_chars]
 # Special characters in identifiers
 unquoted_identifiers_policy = all
 quoted_identifiers_policy = all
@@ -243,35 +362,22 @@ additional_allowed_characters = None
 ignore_words = None
 ignore_words_regex = None
 
-[sqlfluff:rules:L059]
+[sqlfluff:rules:references.quoting]
 # Policy on quoted and unquoted identifiers
 prefer_quoted_identifiers = False
+prefer_quoted_keywords = False
 ignore_words = None
 ignore_words_regex = None
 force_enable = False
 
-[sqlfluff:rules:L062]
-# Comma separated list of blocked words that should not be used
-blocked_words = None
-blocked_regex = None
-
-[sqlfluff:rules:L063]
-# Data Types
-extended_capitalisation_policy = consistent
-# Comma separated list of words to ignore for this rule
-ignore_words = None
-ignore_words_regex = None
-
-[sqlfluff:rules:L064]
-# Consistent usage of preferred quotes for quoted literals
-preferred_quoted_literal_style = consistent
-# Disabled for dialects that do not support single and double quotes for quoted literals (e.g. Postgres)
-force_enable = False
+[sqlfluff:rules:layout.long_lines]
+# Line length
+ignore_comment_lines = False
+ignore_comment_clauses = False
 
-[sqlfluff:rules:L066]
-min_alias_length = None
-max_alias_length = None
+[sqlfluff:rules:layout.select_targets]
+wildcard_policy = single
 
-[sqlfluff:rules:L067]
-# SQL type casting
-preferred_type_casting_style = consistent
+[sqlfluff:rules:structure.subquery]
+# By default, allow subqueries in from clauses, but not join clauses
+forbid_subquery_in = join
diff --git a/src/sqlfluff/core/dialects/__init__.py b/src/sqlfluff/core/dialects/__init__.py
index 7e462dc..edea73a 100644
--- a/src/sqlfluff/core/dialects/__init__.py
+++ b/src/sqlfluff/core/dialects/__init__.py
@@ -24,8 +24,11 @@ _dialect_lookup = {
     "athena": ("dialect_athena", "athena_dialect"),
     "bigquery": ("dialect_bigquery", "bigquery_dialect"),
     "clickhouse": ("dialect_clickhouse", "clickhouse_dialect"),
+    "databricks": ("dialect_databricks", "databricks_dialect"),
     "db2": ("dialect_db2", "db2_dialect"),
+    "duckdb": ("dialect_duckdb", "duckdb_dialect"),
     "exasol": ("dialect_exasol", "exasol_dialect"),
+    "greenplum": ("dialect_greenplum", "greenplum_dialect"),
     "hive": ("dialect_hive", "hive_dialect"),
     "materialize": ("dialect_materialize", "materialize_dialect"),
     "mysql": ("dialect_mysql", "mysql_dialect"),
@@ -34,11 +37,10 @@ _dialect_lookup = {
     "redshift": ("dialect_redshift", "redshift_dialect"),
     "snowflake": ("dialect_snowflake", "snowflake_dialect"),
     "soql": ("dialect_soql", "soql_dialect"),
+    "sparksql": ("dialect_sparksql", "sparksql_dialect"),
     "sqlite": ("dialect_sqlite", "sqlite_dialect"),
     "teradata": ("dialect_teradata", "teradata_dialect"),
     "tsql": ("dialect_tsql", "tsql_dialect"),
-    "sparksql": ("dialect_sparksql", "sparksql_dialect"),
-    "databricks": ("dialect_sparksql", "sparksql_dialect"),
 }
 
 _legacy_dialects = {
@@ -58,6 +60,8 @@ def load_raw_dialect(label: str, base_module: str = "sqlfluff.dialects") -> Dial
     """Dynamically load a dialect."""
     if label in _legacy_dialects:
         raise SQLFluffUserError(_legacy_dialects[label])
+    elif label not in _dialect_lookup:
+        raise KeyError("Unknown dialect")
     module_name, name = _dialect_lookup[label]
     module = import_module(f"{base_module}.{module_name}")
     result: Dialect = getattr(module, name)
diff --git a/src/sqlfluff/core/dialects/base.py b/src/sqlfluff/core/dialects/base.py
index 5232653..17f261d 100644
--- a/src/sqlfluff/core/dialects/base.py
+++ b/src/sqlfluff/core/dialects/base.py
@@ -1,5 +1,6 @@
 """Defines the base dialect class."""
 
+import sys
 from typing import Set, Union, Type
 
 from sqlfluff.core.parser import (
@@ -100,6 +101,14 @@ class Dialect:
             self._sets[label] = set()
         return self._sets[label]
 
+    def update_keywords_set_from_multiline_string(
+        self, set_label: str, values: str
+    ) -> None:
+        """Special function to update a keywords set from a multi-line string."""
+        self.sets(set_label).update(
+            [n.strip().upper() for n in values.strip().split("\n")]
+        )
+
     def copy_as(self, name):
         """Copy this dialect and create a new one with a different name.
 
@@ -267,19 +276,33 @@ class Dialect:
                         name, self.name
                     )
                 )
-        else:  # pragma: no cover
-            if name.endswith("KeywordSegment"):
-                keyword_tip = (
-                    " Perhaps specify the keyword? "
-                    "https://github.com/sqlfluff/sqlfluff/wiki/Contributing-Dialect-Changes#keywords"  # noqa E501
+        elif name.endswith("KeywordSegment"):  # pragma: no cover
+            keyword = name[0:-14]
+            keyword_tip = (
+                "\n\nThe syntax in the query is not (yet?) supported. Try to"
+                " narrow down your query to a minimal, reproducible case and"
+                " raise an issue on GitHub.\n\n"
+                "Or, even better, see this guide on how to help contribute"
+                " keyword and/or dialect updates:\n"
+                "https://github.com/sqlfluff/sqlfluff/wiki/Contributing-Dialect-Changes#keywords"  # noqa E501
+            )
+            # Keyword errors are common so avoid printing the whole, scary,
+            # traceback as not that useful and confusing to people.
+            sys.tracebacklimit = 0
+            raise RuntimeError(
+                (
+                    "Grammar refers to the "
+                    "{!r} keyword which was not found in the {} dialect.{}".format(
+                        keyword, self.name, keyword_tip
+                    )
                 )
-            else:
-                keyword_tip = ""
+            )
+        else:  # pragma: no cover
             raise RuntimeError(
                 (
                     "Grammar refers to "
-                    "{!r} which was not found in the {} dialect.{}".format(
-                        name, self.name, keyword_tip
+                    "{!r} which was not found in the {} dialect.".format(
+                        name, self.name
                     )
                 )
             )
diff --git a/src/sqlfluff/core/enums.py b/src/sqlfluff/core/enums.py
index 8b067d0..c6304d7 100644
--- a/src/sqlfluff/core/enums.py
+++ b/src/sqlfluff/core/enums.py
@@ -12,6 +12,7 @@ class FormatType(Enum):
     yaml = "yaml"
     github_annotation = "github-annotation"
     github_annotation_native = "github-annotation-native"
+    none = "none"  # An option to return _no output_.
 
 
 class Color(Enum):
diff --git a/src/sqlfluff/core/errors.py b/src/sqlfluff/core/errors.py
index d686566..c88639e 100644
--- a/src/sqlfluff/core/errors.py
+++ b/src/sqlfluff/core/errors.py
@@ -83,6 +83,7 @@ class SQLBaseError(ValueError):
             "line_pos": self.line_pos,
             "code": self.rule_code(),
             "description": self.desc(),
+            "name": getattr(self, "rule").name if hasattr(self, "rule") else "",
         }
 
     def check_tuple(self) -> CheckTuple:
@@ -105,7 +106,7 @@ class SQLBaseError(ValueError):
     def warning_if_in(self, warning_iterable: List[str]):
         """Warning only for this violation if it matches the iterable.
 
-        Designed for rule codes so works with L001, L00X but also TMP or PRS
+        Designed for rule codes so works with L001, LL0X but also TMP or PRS
         for templating and parsing errors.
         """
         if self.rule_code() in warning_iterable:
diff --git a/src/sqlfluff/core/linter/__init__.py b/src/sqlfluff/core/linter/__init__.py
index a23ae26..cb14fb5 100644
--- a/src/sqlfluff/core/linter/__init__.py
+++ b/src/sqlfluff/core/linter/__init__.py
@@ -1,6 +1,11 @@
 """Linter class and helper classes."""
 
-from sqlfluff.core.linter.common import RuleTuple, ParsedString, NoQaDirective
+from sqlfluff.core.linter.common import (
+    RuleTuple,
+    ParsedString,
+    NoQaDirective,
+    RenderedFile,
+)
 from sqlfluff.core.linter.linted_file import LintedFile
 from sqlfluff.core.linter.linting_result import LintingResult
 from sqlfluff.core.linter.linter import Linter
@@ -12,4 +17,5 @@ __all__ = (
     "LintedFile",
     "LintingResult",
     "Linter",
+    "RenderedFile",
 )
diff --git a/src/sqlfluff/core/linter/common.py b/src/sqlfluff/core/linter/common.py
index 6ec3be5..2ec8d47 100644
--- a/src/sqlfluff/core/linter/common.py
+++ b/src/sqlfluff/core/linter/common.py
@@ -18,7 +18,10 @@ class RuleTuple(NamedTuple):
     """Rule Tuple object for describing rules."""
 
     code: str
+    name: str
     description: str
+    groups: Tuple[str, ...]
+    aliases: Tuple[str, ...]
 
 
 class NoQaDirective(NamedTuple):
diff --git a/src/sqlfluff/core/linter/linted_file.py b/src/sqlfluff/core/linter/linted_file.py
index 6acfdb9..e7accd2 100644
--- a/src/sqlfluff/core/linter/linted_file.py
+++ b/src/sqlfluff/core/linter/linted_file.py
@@ -10,6 +10,8 @@ import logging
 import shutil
 import stat
 import tempfile
+from collections import defaultdict
+from dataclasses import dataclass
 from typing import (
     Any,
     Iterable,
@@ -20,6 +22,7 @@ from typing import (
     Union,
     cast,
     Type,
+    Dict,
 )
 
 from sqlfluff.core.errors import (
@@ -27,7 +30,6 @@ from sqlfluff.core.errors import (
     SQLLintError,
     CheckTuple,
 )
-from sqlfluff.core.string_helpers import findall
 from sqlfluff.core.templaters import TemplatedFile, RawFileSlice
 
 # Classes needed only for type checking
@@ -39,12 +41,36 @@ from sqlfluff.core.linter.common import NoQaDirective
 linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter")
 
 
+@dataclass
+class FileTimings:
+    """A dataclass for holding the timings information for a file."""
+
+    step_timings: Dict[str, float]
+    # NOTE: Because rules may run more than once for any
+    # given file we record each run and then we can post
+    # process this as we wish later.
+    rule_timings: List[Tuple[str, str, float]]
+
+    def get_rule_timing_dict(self) -> Dict[str, float]:
+        """Generate a summary to total time in each rule.
+
+        This is primarily for csv export.
+        """
+        total_times: Dict[str, float] = defaultdict(float)
+
+        for code, _, time in self.rule_timings:
+            total_times[code] += time
+
+        # Return as plain dict
+        return dict(total_times.items())
+
+
 class LintedFile(NamedTuple):
     """A class to store the idea of a linted file."""
 
     path: str
     violations: List[SQLBaseError]
-    time_dict: dict
+    timings: Optional[FileTimings]
     tree: Optional[BaseSegment]
     ignore_mask: List[NoQaDirective]
     templated_file: TemplatedFile
@@ -88,7 +114,7 @@ class LintedFile(NamedTuple):
                 new_violations.append(v)
                 dedupe_buffer.add(signature)
             else:
-                linter_logger.debug("Removing duplicate source violation: %s", v)
+                linter_logger.debug("Removing duplicate source violation: %r", v)
         return new_violations
 
     def get_violations(
@@ -123,8 +149,9 @@ class LintedFile(NamedTuple):
             violations = [v for v in violations if v.rule_code() in rules]
         # Filter fixable
         if fixable is not None:
-            # Assume that fixable is true or false if not None
-            violations = [v for v in violations if v.fixable is fixable]
+            # Assume that fixable is true or false if not None.
+            # Fatal errors should always come through, regardless.
+            violations = [v for v in violations if v.fixable is fixable or v.fatal]
         # Filter ignorable violations
         if filter_ignore:
             violations = [v for v in violations if not v.ignore]
@@ -298,11 +325,12 @@ class LintedFile(NamedTuple):
         # Generate patches from the fixed tree. In the process we sort
         # and deduplicate them so that the resultant list is in the
         # the right order for the source file without any duplicates.
-        # TODO: Requires a mechanism for generating patches for source only
-        # fixes.
         filtered_source_patches = self._generate_source_patches(
             self.tree, self.templated_file
         )
+        linter_logger.debug("Filtered source patches:")
+        for idx, patch in enumerate(filtered_source_patches):
+            linter_logger.debug("    %s: %s", idx, patch)
 
         # Any Template tags in the source file are off limits, unless
         # we're explicitly fixing the source file.
@@ -311,7 +339,6 @@ class LintedFile(NamedTuple):
 
         # We now slice up the file using the patches and any source only slices.
         # This gives us regions to apply changes to.
-        # TODO: This is the last hurdle for source only fixes.
         slice_buff = self._slice_source_file_using_patches(
             filtered_source_patches, source_only_slices, self.templated_file.source_str
         )
@@ -395,40 +422,15 @@ class LintedFile(NamedTuple):
                 dedupe_buffer.append(patch.dedupe_tuple())
             else:  # pragma: no cover
                 # We've got a situation where the ends of our patch need to be
-                # more carefully mapped. Likely because we're greedily including
-                # a section of source templating with our fix and we need to work
-                # around it gracefully.
-
-                # Identify all the places the string appears in the source content.
-                positions = list(findall(patch.templated_str, patch.source_str))
-                if len(positions) != 1:
-                    # NOTE: This section is not covered in tests. While we
-                    # don't have an example of it's use (we should), the
-                    # code after this relies on there being only one
-                    # instance found - so the safety check remains.
-                    linter_logger.debug(  # pragma: no cover
-                        "        - Skipping edit patch on non-unique templated "
-                        "content: %s",
-                        patch,
-                    )
-                    continue  # pragma: no cover
-
-                # We have a single occurrence of the thing we want to patch. This
-                # means we can use its position to place our patch.
-                new_source_slice = slice(
-                    patch.source_slice.start + positions[0],
-                    patch.source_slice.start + positions[0] + len(patch.templated_str),
+                # more carefully mapped. This used to happen with greedy template
+                # element matching, but should now never happen. In the event that
+                # it does, we'll warn but carry on.
+                linter_logger.warning(
+                    "Skipping edit patch on uncertain templated section [%s], "
+                    "Please report this warning on GitHub along with the query "
+                    "that produced it.",
+                    (patch.patch_category, patch.source_slice),
                 )
-                linter_logger.debug(
-                    "      * Keeping Tricky Case. Positions: %s, New Slice: %s, "
-                    "Patch: %s",
-                    positions,
-                    new_source_slice,
-                    patch,
-                )
-                patch.source_slice = new_source_slice
-                filtered_source_patches.append(patch)
-                dedupe_buffer.append(patch.dedupe_tuple())
                 continue
 
         # Sort the patches before building up the file.
@@ -493,7 +495,10 @@ class LintedFile(NamedTuple):
                 slice_buff.append(slice(source_idx, patch.source_slice.start))
 
             # Is this patch covering an area we've already covered?
-            if patch.source_slice.start < source_idx:
+            if patch.source_slice.start < source_idx:  # pragma: no cover
+                # NOTE: This shouldn't happen. With more detailed templating
+                # this shouldn't happen - but in the off-chance that this does
+                # happen - then this code path remains.
                 linter_logger.info(
                     "Skipping overlapping patch at Index %s, Patch: %s",
                     source_idx,
@@ -585,6 +590,7 @@ class LintedFile(NamedTuple):
         with tempfile.NamedTemporaryFile(
             mode="w",
             encoding=encoding,
+            newline="",  # NOTE: No newline conversion. Write as read.
             prefix=basename,
             dir=dirname,
             suffix=os.path.splitext(output_path)[1],
diff --git a/src/sqlfluff/core/linter/linter.py b/src/sqlfluff/core/linter/linter.py
index 5174234..66bc9c1 100644
--- a/src/sqlfluff/core/linter/linter.py
+++ b/src/sqlfluff/core/linter/linter.py
@@ -14,6 +14,7 @@ from typing import (
     Set,
     Tuple,
     Type,
+    Dict,
     cast,
 )
 
@@ -33,14 +34,13 @@ from sqlfluff.core.parser import Lexer, Parser, RegexLexer
 from sqlfluff.core.file_helpers import get_encoding
 from sqlfluff.core.templaters import TemplatedFile
 from sqlfluff.core.rules import get_ruleset
-from sqlfluff.core.rules.doc_decorators import is_fix_compatible
 from sqlfluff.core.config import FluffConfig, ConfigLoader, progress_bar_configuration
 
 # Classes needed only for type checking
 from sqlfluff.core.parser.segments.base import BaseSegment, SourceFix
 from sqlfluff.core.parser.segments.meta import MetaSegment
 from sqlfluff.core.parser.segments.raw import RawSegment
-from sqlfluff.core.rules import BaseRule
+from sqlfluff.core.rules import BaseRule, RulePack
 
 from sqlfluff.core.linter.common import (
     RuleTuple,
@@ -48,12 +48,13 @@ from sqlfluff.core.linter.common import (
     NoQaDirective,
     RenderedFile,
 )
-from sqlfluff.core.linter.linted_file import LintedFile
+from sqlfluff.core.linter.linted_file import LintedFile, FileTimings
 from sqlfluff.core.linter.linted_dir import LintedDir
 from sqlfluff.core.linter.linting_result import LintingResult
 
 
 WalkableType = Iterable[Tuple[str, Optional[List[str]], List[str]]]
+RuleTimingsType = List[Tuple[str, str, float]]
 
 # Instantiate the linter logger
 linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter")
@@ -93,19 +94,22 @@ class Linter:
         # Store references to user rule classes
         self.user_rules = user_rules or []
 
-    def get_ruleset(self, config: Optional[FluffConfig] = None) -> List[BaseRule]:
+    def get_rulepack(self, config: Optional[FluffConfig] = None) -> RulePack:
         """Get hold of a set of rules."""
         rs = get_ruleset()
         # Register any user rules
         for rule in self.user_rules:
             rs.register(rule)
         cfg = config or self.config
-        return rs.get_rulelist(config=cfg)
+        return rs.get_rulepack(config=cfg)
 
     def rule_tuples(self) -> List[RuleTuple]:
         """A simple pass through to access the rule tuples of the rule set."""
-        rs = self.get_ruleset()
-        return [RuleTuple(rule.code, rule.description) for rule in rs]
+        rs = self.get_rulepack()
+        return [
+            RuleTuple(rule.code, rule.name, rule.description, rule.groups, rule.aliases)
+            for rule in rs.rules
+        ]
 
     # #### Static methods
     # These are the building blocks of the linting process.
@@ -258,13 +262,13 @@ class Linter:
     def parse_noqa(
         comment: str,
         line_no: int,
-        rule_codes: List[str],
+        reference_map: Dict[str, Set[str]],
     ):
         """Extract ignore mask entries from a comment string."""
         # Also trim any whitespace afterward
 
         # Comment lines can also have noqa e.g.
-        # --dafhsdkfwdiruweksdkjdaffldfsdlfjksd -- noqa: L016
+        # --dafhsdkfwdiruweksdkjdaffldfsdlfjksd -- noqa: LT05
         # Therefore extract last possible inline ignore.
         comment = [c.strip() for c in comment.split("--")][-1]
 
@@ -306,22 +310,25 @@ class Linter:
                         unexpanded_rules = tuple(
                             r.strip() for r in rule_part.split(",")
                         )
-                        expanded_rules = []
+                        # We use a set to do natural deduplication.
+                        expanded_rules: Set[str] = set()
                         for r in unexpanded_rules:
-                            expanded_rule = [
-                                x
-                                for x in fnmatch.filter(rule_codes, r)
-                                if x not in expanded_rules
-                            ]
-                            if expanded_rule:
-                                expanded_rules.extend(expanded_rule)
-                            elif r not in expanded_rules:
+                            matched = False
+                            for expanded in (
+                                reference_map[x]
+                                for x in fnmatch.filter(reference_map.keys(), r)
+                            ):
+                                expanded_rules |= expanded
+                                matched = True
+
+                            if not matched:
                                 # We were unable to expand the glob.
                                 # Therefore assume the user is referencing
                                 # a special error type (e.g. PRS, LXR, or TMP)
                                 # and add this to the list of rules to ignore.
-                                expanded_rules.append(r)
-                        rules = tuple(expanded_rules)
+                                expanded_rules.add(r)
+                        # Sort for consistency
+                        rules = tuple(sorted(expanded_rules))
                     else:
                         rules = None
                     return NoQaDirective(line_no, rules, action)
@@ -416,13 +423,13 @@ class Linter:
     def extract_ignore_from_comment(
         cls,
         comment: RawSegment,
-        rule_codes: List[str],
+        reference_map: Dict[str, Set[str]],
     ):
         """Extract ignore mask entries from a comment segment."""
         # Also trim any whitespace afterward
         comment_content = comment.raw_trimmed().strip()
         comment_line, _ = comment.pos_marker.source_position()
-        result = cls.parse_noqa(comment_content, comment_line, rule_codes)
+        result = cls.parse_noqa(comment_content, comment_line, reference_map)
         if isinstance(result, SQLParseError):
             result.segment = comment
         return result
@@ -431,14 +438,14 @@ class Linter:
     def extract_ignore_mask_tree(
         cls,
         tree: BaseSegment,
-        rule_codes: List[str],
+        reference_map: Dict[str, Set[str]],
     ) -> Tuple[List[NoQaDirective], List[SQLBaseError]]:
         """Look for inline ignore comments and return NoQaDirectives."""
         ignore_buff: List[NoQaDirective] = []
         violations: List[SQLBaseError] = []
         for comment in tree.recursive_crawl("comment"):
             if comment.is_type("inline_comment"):
-                ignore_entry = cls.extract_ignore_from_comment(comment, rule_codes)
+                ignore_entry = cls.extract_ignore_from_comment(comment, reference_map)
                 if isinstance(ignore_entry, SQLParseError):
                     violations.append(ignore_entry)
                 elif ignore_entry:
@@ -452,7 +459,7 @@ class Linter:
         cls,
         source: str,
         inline_comment_regex: RegexLexer,
-        rule_codes: List[str],
+        reference_map: Dict[str, Set[str]],
     ) -> Tuple[List[NoQaDirective], List[SQLBaseError]]:
         """Look for inline ignore comments and return NoQaDirectives.
 
@@ -465,7 +472,7 @@ class Linter:
             match = inline_comment_regex.search(line) if line else None
             if match:
                 ignore_entry = cls.parse_noqa(
-                    line[match[0] : match[1]], idx + 1, rule_codes
+                    line[match[0] : match[1]], idx + 1, reference_map
                 )
                 if isinstance(ignore_entry, SQLParseError):
                     violations.append(ignore_entry)  # pragma: no cover
@@ -480,12 +487,12 @@ class Linter:
         cls,
         tree: BaseSegment,
         config: FluffConfig,
-        rule_set: List[BaseRule],
+        rule_pack: RulePack,
         fix: bool = False,
         fname: Optional[str] = None,
         templated_file: Optional[TemplatedFile] = None,
         formatter: Any = None,
-    ) -> Tuple[BaseSegment, List[SQLBaseError], List[NoQaDirective]]:
+    ) -> Tuple[BaseSegment, List[SQLBaseError], List[NoQaDirective], RuleTimingsType]:
         """Lint and optionally fix a tree object."""
         # Keep track of the linting errors on the very first linter pass. The
         # list of issues output by "lint" and "fix" only includes issues present
@@ -496,6 +503,8 @@ class Linter:
         last_fixes = None
         # Keep a set of previous versions to catch infinite loops.
         previous_versions: Set[Tuple[str, Tuple[SourceFix, ...]]] = {(tree.raw, ())}
+        # Keep a buffer for recording rule timings.
+        rule_timings: RuleTimingsType = []
 
         # If we are fixing then we want to loop up to the runaway_limit, otherwise just
         # once for linting.
@@ -503,12 +512,13 @@ class Linter:
 
         # Dispatch the output for the lint header
         if formatter:
-            formatter.dispatch_lint_header(fname)
+            formatter.dispatch_lint_header(fname, sorted(rule_pack.codes()))
 
         # Look for comment segments which might indicate lines to ignore.
         if not config.get("disable_noqa"):
-            rule_codes = [r.code for r in rule_set]
-            ignore_buff, ivs = cls.extract_ignore_mask_tree(tree, rule_codes)
+            ignore_buff, ivs = cls.extract_ignore_mask_tree(
+                tree, rule_pack.reference_map
+            )
             initial_linting_errors += ivs
         else:
             ignore_buff = []
@@ -528,10 +538,10 @@ class Linter:
         for phase in phases:
             if len(phases) > 1:
                 rules_this_phase = [
-                    rule for rule in rule_set if rule.lint_phase == phase
+                    rule for rule in rule_pack.rules if rule.lint_phase == phase
                 ]
             else:
-                rules_this_phase = rule_set
+                rules_this_phase = rule_pack.rules
             for loop in range(loop_limit if phase == "main" else 2):
 
                 def is_first_linter_pass():
@@ -547,7 +557,7 @@ class Linter:
                 if is_first_linter_pass():
                     # In order to compute initial_linting_errors correctly, need
                     # to run all rules on the first loop of the main phase.
-                    rules_this_phase = rule_set
+                    rules_this_phase = rule_pack.rules
                 progress_bar_crawler = tqdm(
                     rules_this_phase,
                     desc="lint by rules",
@@ -564,11 +574,12 @@ class Linter:
                     if (
                         fix
                         and not is_first_linter_pass()
-                        and not is_fix_compatible(crawler)
+                        and not crawler.is_fix_compatible
                     ):
                         continue
 
                     progress_bar_crawler.set_description(f"rule {crawler.code}")
+                    t0 = time.monotonic()
 
                     # fixes should be a dict {} with keys edit, delete, create
                     # delete is just a list of segments to delete
@@ -598,8 +609,13 @@ class Linter:
                                 f"Rule {crawler.code} returned conflicting "
                                 "fixes with the same anchor. This is only "
                                 "supported for create_before+create_after, so "
-                                f"the fixes will not be applied. {fixes!r}"
+                                "the fixes will not be applied. "
                             )
+                            for uuid, info in anchor_info.items():
+                                if not info.is_valid:
+                                    message += f"\n{uuid}:"
+                                    for fix in info.fixes:
+                                        message += f"\n    {fix}"
                             cls._report_conflicting_fixes_same_anchor(message)
                             for lint_result in linting_errors:
                                 lint_result.fixes = []
@@ -636,6 +652,11 @@ class Linter:
                                 # we want to stop.
                                 cls._warn_unfixable(crawler.code)
 
+                    # Record rule timing
+                    rule_timings.append(
+                        (crawler.code, crawler.name, time.monotonic() - t0)
+                    )
+
                 if fix and not changed:
                     # We did not change the file. Either the file is clean (no
                     # fixes), or any fixes which are present will take us back
@@ -671,18 +692,21 @@ class Linter:
                     # Reason: When the linter hits the loop limit, the file is often
                     # messy, e.g. some of the fixes were applied repeatedly, possibly
                     # other weird things. We don't want the user to see this junk!
-                    return save_tree, initial_linting_errors, ignore_buff
+                    return save_tree, initial_linting_errors, ignore_buff, rule_timings
 
         if config.get("ignore_templated_areas", default=True):
             initial_linting_errors = cls.remove_templated_errors(initial_linting_errors)
 
-        return tree, initial_linting_errors, ignore_buff
+        linter_logger.info("\n###\n#\n# {}\n#\n###".format("Fixed Tree:"))
+        linter_logger.info("\n" + tree.stringify())
+
+        return tree, initial_linting_errors, ignore_buff, rule_timings
 
     @classmethod
     def lint_parsed(
         cls,
         parsed: ParsedString,
-        rule_set: List[BaseRule],
+        rule_pack: RulePack,
         fix: bool = False,
         formatter: Any = None,
         encoding: str = "utf8",
@@ -694,10 +718,15 @@ class Linter:
         if parsed.tree:
             t0 = time.monotonic()
             linter_logger.info("LINTING (%s)", parsed.fname)
-            tree, initial_linting_errors, ignore_buff = cls.lint_fix_parsed(
+            (
+                tree,
+                initial_linting_errors,
+                ignore_buff,
+                rule_timings,
+            ) = cls.lint_fix_parsed(
                 parsed.tree,
                 config=parsed.config,
-                rule_set=rule_set,
+                rule_pack=rule_pack,
                 fix=fix,
                 fname=parsed.fname,
                 templated_file=parsed.templated_file,
@@ -713,6 +742,7 @@ class Linter:
             # If no parsed tree, set to None
             tree = None
             ignore_buff = []
+            rule_timings = []
             if not parsed.config.get("disable_noqa"):
                 # Templating and/or parsing have failed. Look for "noqa"
                 # comments (the normal path for identifying these comments
@@ -725,7 +755,7 @@ class Linter:
                         for lm in parsed.config.get("dialect_obj").lexer_matchers
                         if lm.name == "inline_comment"
                     ][0],
-                    [r.code for r in rule_set],
+                    rule_pack.reference_map,
                 )
                 violations += ignore_violations
 
@@ -738,7 +768,7 @@ class Linter:
             parsed.fname,
             # Deduplicate violations
             LintedFile.deduplicate_in_source_space(violations),
-            time_dict,
+            FileTimings(time_dict, rule_timings),
             tree,
             ignore_mask=ignore_buff,
             templated_file=parsed.templated_file,
@@ -764,7 +794,7 @@ class Linter:
     def lint_rendered(
         cls,
         rendered: RenderedFile,
-        rule_set: List[BaseRule],
+        rule_pack: RulePack,
         fix: bool = False,
         formatter: Any = None,
     ) -> LintedFile:
@@ -772,7 +802,7 @@ class Linter:
         parsed = cls.parse_rendered(rendered)
         return cls.lint_parsed(
             parsed,
-            rule_set=rule_set,
+            rule_pack=rule_pack,
             fix=fix,
             formatter=formatter,
             encoding=rendered.encoding,
@@ -882,11 +912,11 @@ class Linter:
     ) -> Tuple[BaseSegment, List[SQLBaseError]]:
         """Return the fixed tree and violations from lintfix when we're fixing."""
         config = config or self.config
-        rule_set = self.get_ruleset(config=config)
-        fixed_tree, violations, _ = self.lint_fix_parsed(
+        rule_pack = self.get_rulepack(config=config)
+        fixed_tree, violations, _, _ = self.lint_fix_parsed(
             tree,
             config,
-            rule_set,
+            rule_pack,
             fix=True,
             fname=fname,
             templated_file=templated_file,
@@ -903,11 +933,11 @@ class Linter:
     ) -> List[SQLBaseError]:
         """Return just the violations from lintfix when we're only linting."""
         config = config or self.config
-        rule_set = self.get_ruleset(config=config)
-        _, violations, _ = self.lint_fix_parsed(
+        rule_pack = self.get_rulepack(config=config)
+        _, violations, _, _ = self.lint_fix_parsed(
             tree,
             config,
-            rule_set,
+            rule_pack,
             fix=False,
             fname=fname,
             templated_file=templated_file,
@@ -938,11 +968,11 @@ class Linter:
             config=config,
         )
         # Get rules as appropriate
-        rule_set = self.get_ruleset(config=config)
+        rule_pack = self.get_rulepack(config=config)
         # Lint the file and return the LintedFile
         return self.lint_parsed(
             parsed,
-            rule_set,
+            rule_pack,
             fix=fix,
             formatter=self.formatter,
             encoding=encoding,
@@ -1046,7 +1076,12 @@ class Linter:
             for ignore_base, ignore_spec in ignores.items():
                 abs_ignore_base = os.path.abspath(ignore_base)
                 if abs_fpath.startswith(
-                    abs_ignore_base + os.sep
+                    abs_ignore_base
+                    + (
+                        ""
+                        if os.path.dirname(abs_ignore_base) == abs_ignore_base
+                        else os.sep
+                    )
                 ) and ignore_spec.match_file(
                     os.path.relpath(abs_fpath, abs_ignore_base)
                 ):
diff --git a/src/sqlfluff/core/linter/linting_result.py b/src/sqlfluff/core/linter/linting_result.py
index e1f7dc7..79b0540 100644
--- a/src/sqlfluff/core/linter/linting_result.py
+++ b/src/sqlfluff/core/linter/linting_result.py
@@ -10,6 +10,7 @@ from typing import (
     overload,
     Tuple,
     Union,
+    Set,
 )
 from typing_extensions import Literal
 
@@ -22,7 +23,7 @@ from sqlfluff.core.errors import (
     SQLTemplaterError,
 )
 
-from sqlfluff.core.timing import TimingSummary
+from sqlfluff.core.timing import TimingSummary, RuleTimingSummary
 
 # Classes needed only for type checking
 from sqlfluff.core.parser.segments.base import BaseSegment
@@ -142,16 +143,19 @@ class LintingResult:
         all_stats["status"] = "FAIL" if all_stats["violations"] > 0 else "PASS"
         return all_stats
 
-    def timing_summary(self) -> Dict[str, Dict[str, float]]:
+    def timing_summary(self) -> Dict[str, Dict[str, Any]]:
         """Return a timing summary."""
         timing = TimingSummary()
+        rules_timing = RuleTimingSummary()
         for dir in self.paths:
             for file in dir.files:
-                timing.add(file.time_dict)
-        return timing.summary()
+                if file.timings:
+                    timing.add(file.timings.step_timings)
+                    rules_timing.add(file.timings.rule_timings)
+        return {**timing.summary(), **rules_timing.summary()}
 
-    def persist_timing_records(self, filename):
-        """Persist the timing records as a csv to external analysis."""
+    def persist_timing_records(self, filename: str) -> None:
+        """Persist the timing records as a csv for external analysis."""
         meta_fields = [
             "path",
             "source_chars",
@@ -160,13 +164,29 @@ class LintingResult:
             "raw_segments",
         ]
         timing_fields = ["templating", "lexing", "parsing", "linting"]
+
+        # Iterate through all the files to get rule timing information so
+        # we know what headings we're going to need.
+        rule_codes: Set[str] = set()
+        file_timing_dicts: Dict[str, dict] = {}
+        for dir in self.paths:
+            for file in dir.files:
+                if not file.timings:  # pragma: no cover
+                    continue
+                file_timing_dicts[file.path] = file.timings.get_rule_timing_dict()
+                rule_codes.update(file_timing_dicts[file.path].keys())
+
         with open(filename, "w", newline="") as f:
-            writer = csv.DictWriter(f, fieldnames=meta_fields + timing_fields)
+            writer = csv.DictWriter(
+                f, fieldnames=meta_fields + timing_fields + sorted(rule_codes)
+            )
 
             writer.writeheader()
 
             for dir in self.paths:
                 for file in dir.files:
+                    if not file.timings:  # pragma: no cover
+                        continue
                     writer.writerow(
                         {
                             "path": file.path,
@@ -190,7 +210,8 @@ class LintingResult:
                                 if file.tree
                                 else ""
                             ),
-                            **file.time_dict,
+                            **file.timings.step_timings,
+                            **file_timing_dicts[file.path],
                         }
                     )
 
diff --git a/src/sqlfluff/core/linter/runner.py b/src/sqlfluff/core/linter/runner.py
index 02e7d4a..b4808f5 100644
--- a/src/sqlfluff/core/linter/runner.py
+++ b/src/sqlfluff/core/linter/runner.py
@@ -19,7 +19,7 @@ from typing import Callable, List, Tuple, Iterator
 
 from sqlfluff.core import FluffConfig, Linter
 from sqlfluff.core.errors import SQLFluffSkipFile
-from sqlfluff.core.linter import LintedFile
+from sqlfluff.core.linter import LintedFile, RenderedFile
 
 linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter")
 
@@ -37,7 +37,7 @@ class BaseRunner(ABC):
 
     pass_formatter = True
 
-    def iter_rendered(self, fnames: List[str]) -> Iterator[Tuple]:
+    def iter_rendered(self, fnames: List[str]) -> Iterator[Tuple[str, RenderedFile]]:
         """Iterate through rendered files ready for linting."""
         for fname in self.linter.templater.sequence_files(
             fnames, config=self.config, formatter=self.linter.formatter
@@ -58,13 +58,13 @@ class BaseRunner(ABC):
         """
         for fname, rendered in self.iter_rendered(fnames):
             # Generate a fresh ruleset
-            rule_set = self.linter.get_ruleset(config=rendered.config)
+            rule_pack = self.linter.get_rulepack(config=rendered.config)
             yield (
                 fname,
                 functools.partial(
                     self.linter.lint_rendered,
                     rendered,
-                    rule_set,
+                    rule_pack,
                     fix,
                     # Formatters may or may not be passed. They don't pickle
                     # nicely so aren't appropriate in a multiprocessing world.
diff --git a/src/sqlfluff/core/parser/__init__.py b/src/sqlfluff/core/parser/__init__.py
index 36e3627..8a88ceb 100644
--- a/src/sqlfluff/core/parser/__init__.py
+++ b/src/sqlfluff/core/parser/__init__.py
@@ -13,6 +13,7 @@ from sqlfluff.core.parser.segments import (
     SymbolSegment,
     Indent,
     Dedent,
+    ImplicitIndent,
     SegmentGenerator,
     IdentitySet,
 )
@@ -55,6 +56,7 @@ __all__ = (
     "SymbolSegment",
     "Indent",
     "Dedent",
+    "ImplicitIndent",
     "SegmentGenerator",
     "Sequence",
     "GreedyUntil",
diff --git a/src/sqlfluff/core/parser/context.py b/src/sqlfluff/core/parser/context.py
index deb46d3..dfce545 100644
--- a/src/sqlfluff/core/parser/context.py
+++ b/src/sqlfluff/core/parser/context.py
@@ -10,10 +10,12 @@ and match depth of the current operation.
 
 import logging
 import uuid
+from typing import Optional, TYPE_CHECKING, Dict
 
-# Get the parser logger
-from typing import Dict
+if TYPE_CHECKING:  # pragma: no cover
+    from sqlfluff.core.parser.match_result import MatchResult
 
+# Get the parser logger
 parser_logger = logging.getLogger("sqlfluff.parser")
 
 
@@ -42,6 +44,9 @@ class RootParseContext:
         self.logger = parser_logger
         # A uuid for this parse context to enable cache invalidation
         self.uuid = uuid.uuid4()
+        # A dict for parse caching. This is reset for each file,
+        # but persists for the duration of an individual file parse.
+        self._parse_cache = {}
 
     @classmethod
     def from_config(cls, config, **overrides: Dict[str, bool]) -> "RootParseContext":
@@ -172,6 +177,19 @@ class ParseContext:
         ctx.match_segment = name
         return ctx
 
+    def check_parse_cache(
+        self, loc_key: tuple, matcher_key: str
+    ) -> Optional["MatchResult"]:
+        """Check against the parse cache for a pre-existing match.
+
+        If no match is found in the cache, this returns None.
+        """
+        return self._root_ctx._parse_cache.get((loc_key, matcher_key))
+
+    def put_parse_cache(self, loc_key: tuple, matcher_key: str, match: "MatchResult"):
+        """Store a match in the cache for later retrieval."""
+        self._root_ctx._parse_cache[(loc_key, matcher_key)] = match
+
 
 class ParseDenylist:
     """Acts as a cache to stop unnecessary matching."""
diff --git a/src/sqlfluff/core/parser/grammar/anyof.py b/src/sqlfluff/core/parser/grammar/anyof.py
index 0ab8bfc..0fe9412 100644
--- a/src/sqlfluff/core/parser/grammar/anyof.py
+++ b/src/sqlfluff/core/parser/grammar/anyof.py
@@ -1,6 +1,6 @@
 """AnyNumberOf and OneOf."""
 
-from typing import List, Optional, Tuple
+from typing import List, Optional, Tuple, Set
 
 from sqlfluff.core.parser.context import ParseContext
 from sqlfluff.core.parser.grammar.base import (
@@ -25,12 +25,14 @@ class AnyNumberOf(BaseGrammar):
         self.max_times_per_element = kwargs.pop("max_times_per_element", None)
         # Any patterns to _prevent_ a match.
         self.exclude = kwargs.pop("exclude", None)
+        # The intent here is that if we match something, and then the _next_
+        # item is one of these, we can safely conclude it's a "total" match.
+        # In those cases, we return early without considering more options.
+        self.terminators = kwargs.pop("terminators", None)
         super().__init__(*args, **kwargs)
 
     @cached_method_for_parse_context
-    def simple(
-        self, parse_context: ParseContext, crumbs: Optional[List[str]] = None
-    ) -> Optional[List[str]]:
+    def simple(self, parse_context: ParseContext, crumbs: Optional[List[str]] = None):
         """Does this matcher support a uppercase hash matching route?
 
         AnyNumberOf does provide this, as long as *all* the elements *also* do.
@@ -41,8 +43,13 @@ class AnyNumberOf(BaseGrammar):
         ]
         if any(elem is None for elem in simple_buff):
             return None
-        # Flatten the list
-        return [inner for outer in simple_buff for inner in outer]
+        # Combine the lists
+        simple_raws = [simple[0] for simple in simple_buff if simple[0]]
+        simple_types = [simple[1] for simple in simple_buff if simple[1]]
+        return (
+            frozenset.union(*simple_raws) if simple_raws else frozenset(),
+            frozenset.union(*simple_types) if simple_types else frozenset(),
+        )
 
     def is_optional(self) -> bool:
         """Return whether this element is optional.
@@ -53,19 +60,21 @@ class AnyNumberOf(BaseGrammar):
         return self.optional or self.min_times == 0
 
     @staticmethod
-    def _first_non_whitespace(segments) -> Optional[str]:
+    def _first_non_whitespace(segments) -> Optional[Tuple[str, Set[str]]]:
         """Return the upper first non-whitespace segment in the iterable."""
         for segment in segments:
             if segment.first_non_whitespace_segment_raw_upper:
-                return segment.first_non_whitespace_segment_raw_upper
+                return (
+                    segment.first_non_whitespace_segment_raw_upper,
+                    segment.class_types,
+                )
         return None
 
     def _prune_options(
         self, segments: Tuple[BaseSegment, ...], parse_context: ParseContext
-    ) -> Tuple[List[MatchableType], List[str]]:
+    ) -> List[MatchableType]:
         """Use the simple matchers to prune which options to match on."""
         available_options = []
-        simple_opts = []
         prune_buff = []
         non_simple = 0
         pruned_simple = 0
@@ -73,6 +82,11 @@ class AnyNumberOf(BaseGrammar):
 
         # Find the first code element to match against.
         first_elem = self._first_non_whitespace(segments)
+        # If we don't have an appropriate option to match against,
+        # then we should just return immediately. Nothing will match.
+        if not first_elem:
+            return self._elements
+        first_raw, first_types = first_elem
 
         for opt in self._elements:
             simple = opt.simple(parse_context=parse_context)
@@ -82,27 +96,31 @@ class AnyNumberOf(BaseGrammar):
                 available_options.append(opt)
                 non_simple += 1
                 continue
+
             # Otherwise we have a simple option, so let's use
             # it for pruning.
-            for simple_opt in simple:
-                # Check it's not a whitespace option
-                if not simple_opt.strip():  # pragma: no cover
-                    raise NotImplementedError(
-                        "_prune_options not supported for whitespace matching."
-                    )
-                # We want to know if the first meaningful element of the str_buff
-                # matches the option.
+            simple_raws, simple_types = simple
+            matched = False
 
-                # match the FIRST non-whitespace element of the list.
-                if first_elem != simple_opt:
-                    # No match, carry on.
-                    continue
+            # We want to know if the first meaningful element of the str_buff
+            # matches the option, based on either simple _raw_ matching or
+            # simple _type_ matching.
+
+            # Match Raws
+            if simple_raws and first_raw in simple_raws:
                 # If we get here, it's matched the FIRST element of the string buffer.
                 available_options.append(opt)
-                simple_opts.append(simple_opt)
                 matched_simple += 1
-                break
-            else:
+                matched = True
+
+            # Match Types
+            if simple_types and not matched and first_types.intersection(simple_types):
+                # If we get here, it's matched the FIRST element of the string buffer.
+                available_options.append(opt)
+                matched_simple += 1
+                matched = True
+
+            if not matched:
                 # Ditch this option, the simple match has failed
                 prune_buff.append(opt)
                 pruned_simple += 1
@@ -121,7 +139,7 @@ class AnyNumberOf(BaseGrammar):
             opts=available_options or "ALL",
         )
 
-        return available_options, simple_opts
+        return available_options
 
     def _match_once(
         self, segments: Tuple[BaseSegment, ...], parse_context: ParseContext
@@ -136,13 +154,11 @@ class AnyNumberOf(BaseGrammar):
         # to return earlier if we can.
         # `segments` may already be nested so we need to break out
         # the raw segments within it.
-        available_options, _ = self._prune_options(
-            segments, parse_context=parse_context
-        )
+        available_options = self._prune_options(segments, parse_context=parse_context)
 
         # If we've pruned all the options, return unmatched (with some logging).
         if not available_options:
-            return MatchResult.from_unmatched(segments)
+            return MatchResult.from_unmatched(segments), None
 
         with parse_context.deeper_match() as ctx:
             match, matched_option = self._longest_trimmed_match(
@@ -150,6 +166,7 @@ class AnyNumberOf(BaseGrammar):
                 available_options,
                 parse_context=ctx,
                 trim_noncode=False,
+                terminators=self.terminators,
             )
 
         return match, matched_option
@@ -177,10 +194,7 @@ class AnyNumberOf(BaseGrammar):
         n_matches = 0
 
         # Keep track of the number of times each option has been matched.
-        available_options, _ = self._prune_options(
-            segments, parse_context=parse_context
-        )
-        available_option_counter = {str(o): 0 for o in available_options}
+        option_counter = {elem.cache_key(): 0 for elem in self._elements}
 
         while True:
             if self.max_times and n_matches >= self.max_times:
@@ -213,17 +227,18 @@ class AnyNumberOf(BaseGrammar):
             )
 
             # Increment counter for matched option.
-            if matched_option and (str(matched_option) in available_option_counter):
-                available_option_counter[str(matched_option)] += 1
-                # Check if we have matched an option too many times.
-                if (
-                    self.max_times_per_element
-                    and available_option_counter[str(matched_option)]
-                    > self.max_times_per_element
-                ):
-                    return MatchResult(
-                        matched_segments.matched_segments, unmatched_segments
-                    )
+            if matched_option:
+                matched_key = matched_option.cache_key()
+                if matched_option.cache_key() in option_counter:
+                    option_counter[matched_key] += 1
+                    # Check if we have matched an option too many times.
+                    if (
+                        self.max_times_per_element
+                        and option_counter[matched_key] > self.max_times_per_element
+                    ):
+                        return MatchResult(
+                            matched_segments.matched_segments, unmatched_segments
+                        )
 
             if match:
                 matched_segments += pre_seg + match.matched_segments
diff --git a/src/sqlfluff/core/parser/grammar/base.py b/src/sqlfluff/core/parser/grammar/base.py
index 9a67eff..1ba20d9 100644
--- a/src/sqlfluff/core/parser/grammar/base.py
+++ b/src/sqlfluff/core/parser/grammar/base.py
@@ -2,13 +2,14 @@
 
 import copy
 from dataclasses import dataclass
-from typing import TYPE_CHECKING, List, Optional, Union, Type, Tuple, Any
+from typing import TYPE_CHECKING, List, Optional, Union, Type, Tuple, Any, cast
+from uuid import uuid4
 
 from sqlfluff.core.errors import SQLParseError
 from sqlfluff.core.string_helpers import curtail_string
 
 from sqlfluff.core.parser.segments import BaseSegment, BracketedSegment, allow_ephemeral
-from sqlfluff.core.parser.helpers import trim_non_code_segments, iter_indices
+from sqlfluff.core.parser.helpers import trim_non_code_segments
 from sqlfluff.core.parser.match_result import MatchResult
 from sqlfluff.core.parser.match_logging import (
     parse_match_logging,
@@ -26,6 +27,28 @@ if TYPE_CHECKING:
     from sqlfluff.core.dialects.base import Dialect  # pragma: no cover
 
 
+def first_trimmed_raw(seg):
+    """Trim whitespace off a whole element raw.
+
+    Used as a helper function in BaseGrammar._look_ahead_match.
+
+    For existing compound segments, we should assume that within
+    that segment, things are internally consistent, that means
+    rather than enumerating all the individual segments of a longer
+    one we just dump out the whole segment, but splitting off the
+    first element separated by whitespace. This is a) faster and
+    also b) prevents some really horrible bugs with bracket matching.
+    See https://github.com/sqlfluff/sqlfluff/issues/433
+
+    This fetches the _whole_ raw of a potentially compound segment
+    to match against, trimming off any whitespace. This is the
+    most efficient way to get at the first element of a potentially
+    longer segment.
+    """
+    s = seg.raw_upper.split(maxsplit=1)
+    return s[0] if s else ""
+
+
 @dataclass
 class BracketInfo:
     """BracketInfo tuple for keeping track of brackets during matching.
@@ -163,6 +186,15 @@ class BaseGrammar(Matchable):
         # If this is the case, the actual segment construction happens in the
         # match_wrapper.
         self.ephemeral_name = ephemeral_name
+        # Generate a cache key
+        self._cache_key = uuid4().hex
+
+    def cache_key(self) -> str:
+        """Get the cache key for this grammar.
+
+        For grammars these are unique per-instance.
+        """
+        return self._cache_key
 
     def is_optional(self):
         """Return whether this segment is optional.
@@ -185,7 +217,7 @@ class BaseGrammar(Matchable):
         )  # pragma: no cover
 
     @cached_method_for_parse_context
-    def simple(self, parse_context: ParseContext, crumbs=None) -> Optional[List[str]]:
+    def simple(self, parse_context: ParseContext, crumbs=None):
         """Does this matcher support a lowercase hash matching route?"""
         return None
 
@@ -218,13 +250,44 @@ class BaseGrammar(Matchable):
         if trim_noncode:
             pre_nc, segments, post_nc = trim_non_code_segments(segments)
 
+        # At parse time we should be able to count on there being a location.
+        assert segments[0].pos_marker
+
+        # Characterise this location.
+        # Initial segment raw, loc, type and length of segment series.
+        loc_key = (
+            segments[0].raw,
+            segments[0].pos_marker.working_loc,
+            segments[0].get_type(),
+            len(segments),
+        )
+
         best_match_length = 0
         # iterate at this position across all the matchers
         for matcher in matchers:
-            # MyPy seems to require a type hint here. Not quite sure why.
-            res_match: MatchResult = matcher.match(
-                segments, parse_context=parse_context
+            # Check parse cache.
+            matcher_key = matcher.cache_key()
+            res_match: Optional[MatchResult] = parse_context.check_parse_cache(
+                loc_key, matcher_key
             )
+            if res_match:
+                parse_match_logging(
+                    cls.__name__,
+                    "_look_ahead_match",
+                    "HIT",
+                    parse_context=parse_context,
+                    cache_hit=matcher.__class__.__name__,
+                    cache_key=matcher_key,
+                )
+            else:
+                # Match fresh if no cache hit
+                res_match = matcher.match(segments, parse_context=parse_context)
+                # Cache it for later to for performance.
+                parse_context.put_parse_cache(loc_key, matcher_key, res_match)
+
+            # By here we know that it's a MatchResult
+            res_match = cast(MatchResult, res_match)
+
             if res_match.is_complete():
                 # Just return it! (WITH THE RIGHT OTHER STUFF)
                 if trim_noncode:
@@ -242,6 +305,10 @@ class BaseGrammar(Matchable):
                     best_match = res_match, matcher
                     best_match_length = res_match.trimmed_matched_length
 
+                    # If we've got a terminator next, it's an opportunity to
+                    # end earlier, and claim an effectively "complete" match.
+                    # NOTE: This means that by specifying terminators, we can
+                    # significantly increase performance.
                     if terminators:
                         _, segs, _ = trim_non_code_segments(
                             best_match[0].unmatched_segments
@@ -316,163 +383,84 @@ class BaseGrammar(Matchable):
         # Here we enable a performance optimisation. Most of the time in this cycle
         # happens in loops looking for simple matchers which we should
         # be able to find a shortcut for.
-        # First: Assess the matchers passed in, if any are
-        # "simple", then we effectively use a hash lookup across the
-        # content of segments to quickly evaluate if the segment is present.
-        # Matchers which aren't "simple" still take a slower route.
-        _matchers = [
-            (matcher, matcher.simple(parse_context=parse_context))
-            for matcher in matchers
-        ]
-        simple_matchers = [matcher for matcher in _matchers if matcher[1]]
-        non_simple_matchers = [matcher[0] for matcher in _matchers if not matcher[1]]
-        best_simple_match = None
-        if simple_matchers:
-            # If they're all simple we can use a hash match to identify the first one.
-            # Build a buffer of all the upper case raw segments ahead of us.
-            str_buff = []
-            # For existing compound segments, we should assume that within
-            # that segment, things are internally consistent, that means
-            # rather than enumerating all the individual segments of a longer
-            # one we just dump out the whole segment, but splitting off the
-            # first element separated by whitespace. This is a) faster and
-            # also b) prevents some really horrible bugs with bracket matching.
-            # See https://github.com/sqlfluff/sqlfluff/issues/433
-
-            def _trim_elem(seg):
-                s = seg.raw_upper.split(maxsplit=1)
-                return s[0] if s else ""
-
-            str_buff = [_trim_elem(seg) for seg in segments]
-            match_queue = []
-
-            for matcher, simple in simple_matchers:
-                # Simple will be a tuple of options
-                assert simple
-                for simple_option in simple:
-                    # NOTE: We use iter_indices to make sure we capture
-                    # all instances of potential matches if there are many.
-                    # This is important for bracket counting.
-                    for buff_pos in iter_indices(str_buff, simple_option):
-                        match_queue.append((matcher, buff_pos, simple_option))
-
-            # Sort the match queue. First to process AT THE END.
-            # That means we pop from the end.
-            match_queue = sorted(match_queue, key=lambda x: x[1])
-
-            parse_match_logging(
-                cls.__name__,
-                "_look_ahead_match",
-                "SI",
-                parse_context=parse_context,
-                v_level=4,
-                mq=match_queue,
-                sb=str_buff,
-            )
-
-            while match_queue:
-                # We've managed to match. We can shortcut home.
-                # NB: We may still need to deal with whitespace.
-                queued_matcher, queued_buff_pos, queued_option = match_queue.pop()
-                # Here we do the actual transform to the new segment.
-                match = queued_matcher.match(segments[queued_buff_pos:], parse_context)
-                if not match:
-                    # We've had something match in simple matching, but then later
-                    # excluded. Log but then move on to the next item on the list.
-                    parse_match_logging(
-                        cls.__name__,
-                        "_look_ahead_match",
-                        "NM",
-                        parse_context=parse_context,
-                        v_level=4,
-                        _so=queued_option,
-                    )
-                    continue
-                # Ok we have a match. Because we sorted the list, we'll take it!
-                best_simple_match = (segments[:queued_buff_pos], match, queued_matcher)
-
-        if not non_simple_matchers:
-            # There are no other matchers, we can just shortcut now.
-
-            parse_match_logging(
-                cls.__name__,
-                "_look_ahead_match",
-                "SC",
-                parse_context=parse_context,
-                v_level=4,
-                bsm=None
-                if not best_simple_match
-                else (
-                    len(best_simple_match[0]),
-                    len(best_simple_match[1]),
-                    best_simple_match[2],
-                ),
-            )
 
-            if best_simple_match:
-                return best_simple_match
-            else:
-                return ((), MatchResult.from_unmatched(segments), None)
-
-        # Make some buffers
-        seg_buff = segments
-        pre_seg_buff: Tuple[BaseSegment, ...] = ()
-
-        # Loop
-        while True:
-            # Do we have anything left to match on?
-            if seg_buff:
-                # Great, carry on.
-                pass
-            else:
-                # We've got to the end without a match, return empty
-                return ((), MatchResult.from_unmatched(segments), None)
-
-            # We only check the NON-simple ones here for brevity.
-            mat, m = cls._longest_trimmed_match(
-                seg_buff,
-                non_simple_matchers,
-                parse_context=parse_context,
-                trim_noncode=False,
-            )
+        parse_match_logging(
+            cls.__name__,
+            "_look_ahead_match",
+            "SI",
+            parse_context=parse_context,
+            v_level=4,
+        )
 
-            if mat and not best_simple_match:
-                return (pre_seg_buff, mat, m)
-            elif mat:
-                # Given we have mat - we should always have these two.
-                assert m
-                assert best_simple_match
-                # It will be earlier than the simple one if we've even checked,
-                # but there's a chance that this might be *longer*, or just FIRST.
-                pre_lengths = (len(pre_seg_buff), len(best_simple_match[0]))
-                mat_lengths = (len(mat), len(best_simple_match[1]))
-                mat_indexes = (matchers.index(m), matchers.index(best_simple_match[2]))
-                if (
-                    (pre_lengths[0] < pre_lengths[1])
-                    or (
-                        pre_lengths[0] == pre_lengths[1]
-                        and mat_lengths[0] > mat_lengths[1]
+        best_simple_match = None
+        simple_match = None
+        for idx, seg in enumerate(segments):
+            for matcher in matchers:
+                simple = matcher.simple(parse_context=parse_context)
+                if not simple:  # pragma: no cover
+                    # NOTE: For all bundled dialects, this clause is true, but until
+                    # the RegexMatcher is completely deprecated (and therefore that
+                    # `.simple()` must provide a result), it is still _possible_
+                    # to end up here.
+                    raise NotImplementedError(
+                        "All matchers passed to `._look_ahead_match()` are "
+                        "assumed to have a functioning `.simple()` option. "
+                        "In a future release it will be compulsory for _all_ "
+                        "matchables to implement `.simple()`. Please report "
+                        "this as a bug on GitHub along with your current query "
+                        f"and dialect.\nProblematic matcher: {matcher}"
                     )
-                    or (
-                        pre_lengths[0] == pre_lengths[1]
-                        and mat_lengths[0] == mat_lengths[1]
-                        and mat_indexes[0] < mat_indexes[1]
+                simple_raws, simple_types = simple
+
+                assert simple_raws or simple_types
+                if simple_raws:
+                    trimmed_seg = first_trimmed_raw(seg)
+                    if trimmed_seg in simple_raws:
+                        simple_match = matcher
+                        break
+                if simple_types and not simple_match:
+                    intersection = simple_types.intersection(seg.class_types)
+                    if intersection:
+                        simple_match = matcher
+                        break
+
+            # We've managed to match. We can shortcut home.
+            # NB: We may still need to deal with whitespace.
+            if simple_match:
+                # If we have a _simple_ match, now we should call the
+                # full match method to actually produce the result.
+                match = simple_match.match(segments[idx:], parse_context)
+                if match:
+                    best_simple_match = (
+                        segments[:idx],
+                        match,
+                        simple_match,
                     )
-                ):
-                    return (pre_seg_buff, mat, m)
+                    break
                 else:
-                    # TODO: Make a test case to cover this.
-                    return best_simple_match  # pragma: no cover
-            else:
-                # If there aren't any matches, then advance the buffer and try again.
-                # Two improvements:
-                # 1) if we get as far as the first simple match, then return that.
-                # 2) be eager in consuming non-code segments if allowed
-                if best_simple_match and len(pre_seg_buff) >= len(best_simple_match[0]):
-                    return best_simple_match
+                    simple_match = None
 
-                pre_seg_buff += (seg_buff[0],)
-                seg_buff = seg_buff[1:]
+        # There are no other matchers, we can just shortcut now. Either with
+        # no match, or the best one we found (if we found one).
+        parse_match_logging(
+            cls.__name__,
+            "_look_ahead_match",
+            "SC",
+            parse_context=parse_context,
+            v_level=4,
+            bsm=None
+            if not best_simple_match
+            else (
+                len(best_simple_match[0]),
+                len(best_simple_match[1]),
+                best_simple_match[2],
+            ),
+        )
+
+        if best_simple_match:
+            return best_simple_match
+        else:
+            return ((), MatchResult.from_unmatched(segments), None)
 
     @classmethod
     def _bracket_sensitive_look_ahead_match(
@@ -815,9 +803,7 @@ class Ref(BaseGrammar):
         super().__init__(*args, **kwargs)
 
     @cached_method_for_parse_context
-    def simple(
-        self, parse_context: ParseContext, crumbs: Optional[Tuple[str]] = None
-    ) -> Optional[List[str]]:
+    def simple(self, parse_context: ParseContext, crumbs: Optional[Tuple[str]] = None):
         """Does this matcher support a uppercase hash matching route?
 
         A ref is simple, if the thing it references is simple.
diff --git a/src/sqlfluff/core/parser/grammar/greedy.py b/src/sqlfluff/core/parser/grammar/greedy.py
index f9e016b..7b1b0a1 100644
--- a/src/sqlfluff/core/parser/grammar/greedy.py
+++ b/src/sqlfluff/core/parser/grammar/greedy.py
@@ -1,7 +1,5 @@
 """GreedyUntil and StartsWith Grammars."""
 
-from typing import Optional, List
-
 from sqlfluff.core.parser.helpers import trim_non_code_segments
 from sqlfluff.core.parser.match_result import MatchResult
 from sqlfluff.core.parser.match_wrapper import match_wrapper
@@ -170,7 +168,7 @@ class StartsWith(GreedyUntil):
         super().__init__(*args, **kwargs)
 
     @cached_method_for_parse_context
-    def simple(self, parse_context: ParseContext, crumbs=None) -> Optional[List[str]]:
+    def simple(self, parse_context: ParseContext, crumbs=None):
         """Does this matcher support a uppercase hash matching route?
 
         `StartsWith` is simple, if the thing it starts with is also simple.
diff --git a/src/sqlfluff/core/parser/grammar/noncode.py b/src/sqlfluff/core/parser/grammar/noncode.py
index dbd2efe..0926294 100644
--- a/src/sqlfluff/core/parser/grammar/noncode.py
+++ b/src/sqlfluff/core/parser/grammar/noncode.py
@@ -4,8 +4,6 @@ This is a stub of a grammar, intended for use entirely as a
 terminator or similar alongside other matchers.
 """
 
-from typing import Optional, List
-
 from sqlfluff.core.parser.match_wrapper import match_wrapper
 from sqlfluff.core.parser.match_result import MatchResult
 from sqlfluff.core.parser.matchable import Matchable
@@ -15,7 +13,7 @@ from sqlfluff.core.parser.context import ParseContext
 class NonCodeMatcher(Matchable):
     """An object which behaves like a matcher to match non-code."""
 
-    def simple(self, parse_context: ParseContext, crumbs=None) -> Optional[List[str]]:
+    def simple(self, parse_context: ParseContext, crumbs=None):
         """This element doesn't work with simple."""
         return None
 
@@ -23,6 +21,15 @@ class NonCodeMatcher(Matchable):
         """Not optional."""
         return False
 
+    def cache_key(self) -> str:
+        """Get the cache key for the matcher.
+
+        NOTE: In this case, this class is a bit of a singleton
+        and so we don't need a unique UUID in the same way as
+        other classes.
+        """
+        return "non-code-matcher"
+
     @match_wrapper(v_level=4)
     def match(self, segments, parse_context):
         """Match any starting non-code segments."""
diff --git a/src/sqlfluff/core/parser/grammar/sequence.py b/src/sqlfluff/core/parser/grammar/sequence.py
index a729287..1e7f0fb 100644
--- a/src/sqlfluff/core/parser/grammar/sequence.py
+++ b/src/sqlfluff/core/parser/grammar/sequence.py
@@ -1,6 +1,8 @@
 """Sequence and Bracketed Grammars."""
 
-from typing import Optional, List, Tuple, cast
+# NOTE: We rename the typing.Sequence here so it doesn't collide
+# with the grammar class that we're defining.
+from typing import Optional, Tuple, cast, Type, Sequence as _Sequence
 
 from sqlfluff.core.errors import SQLParseError
 
@@ -15,39 +17,80 @@ from sqlfluff.core.parser.segments import (
 from sqlfluff.core.parser.helpers import trim_non_code_segments, check_still_complete
 from sqlfluff.core.parser.match_result import MatchResult
 from sqlfluff.core.parser.match_wrapper import match_wrapper
+from sqlfluff.core.parser.matchable import Matchable
 from sqlfluff.core.parser.context import ParseContext
 from sqlfluff.core.parser.grammar.base import (
     BaseGrammar,
+    MatchableType,
     cached_method_for_parse_context,
 )
 from sqlfluff.core.parser.grammar.conditional import Conditional
 from os import getenv
 
 
+def _all_remaining_metas(
+    remaining_elements: _Sequence[MatchableType], parse_context: ParseContext
+) -> Optional[Tuple[BaseSegment, ...]]:
+    """Check the remaining elements, instantiate them if they're metas.
+
+    Helper function in `Sequence.match()`.
+    """
+    # Are all the remaining elements metas?
+    if not all(
+        e.is_optional()
+        or isinstance(e, Conditional)
+        or (not isinstance(e, Matchable) and e.is_meta)
+        for e in remaining_elements
+    ):
+        # No? Return Nothing.
+        return None
+
+    # Yes, so we shortcut back early because we don't want
+    # to claim any more whitespace.
+    return_segments: Tuple[BaseSegment, ...] = tuple()
+    # Instantiate all the metas
+    for e in remaining_elements:
+        # If it's meta, instantiate it.
+        if e.is_optional():
+            continue
+        elif isinstance(e, Conditional):
+            if e.is_enabled(parse_context):
+                meta_match = e.match(tuple(), parse_context)
+                if meta_match:
+                    return_segments += meta_match.matched_segments
+            continue
+        elif not isinstance(e, Matchable) and e.is_meta:
+            indent_seg = cast(Type[MetaSegment], e)
+            return_segments += (indent_seg(),)
+    return return_segments
+
+
 class Sequence(BaseGrammar):
     """Match a specific sequence of elements."""
 
     test_env = getenv("SQLFLUFF_TESTENV", "")
 
     @cached_method_for_parse_context
-    def simple(self, parse_context: ParseContext, crumbs=None) -> Optional[List[str]]:
+    def simple(self, parse_context: ParseContext, crumbs=None):
         """Does this matcher support a uppercase hash matching route?
 
         Sequence does provide this, as long as the *first* non-optional
         element does, *AND* and optional elements which preceded it also do.
         """
-        simple_buff = []
+        simple_raws = set()
+        simple_types = set()
         for opt in self._elements:
             simple = opt.simple(parse_context=parse_context, crumbs=crumbs)
             if not simple:
                 return None
-            simple_buff += simple
+            simple_raws.update(simple[0])
+            simple_types.update(simple[1])
 
             if not opt.is_optional():
                 # We found our first non-optional element!
-                return simple_buff
+                return frozenset(simple_raws), frozenset(simple_types)
         # If *all* elements are optional AND simple, I guess it's also simple.
-        return simple_buff
+        return frozenset(simple_raws), frozenset(simple_types)
 
     @match_wrapper()
     @allow_ephemeral
@@ -70,101 +113,115 @@ class Sequence(BaseGrammar):
                 break
 
             while True:
-                # Consume non-code if appropriate
-                if self.allow_gaps:
-                    pre_nc, mid_seg, post_nc = trim_non_code_segments(
-                        unmatched_segments
+                # Is there anything left to match on?
+                if len(unmatched_segments) == 0:
+                    # There isn't, but we still have elements left to match.
+                    # Do only optional or meta elements remain?
+                    remaining_metas = _all_remaining_metas(
+                        self._elements[idx:], parse_context
                     )
-                else:
-                    pre_nc = ()
-                    mid_seg = unmatched_segments
-                    post_nc = ()
+                    if remaining_metas is not None:
+                        # We're safe. Claim them and return.
+                        meta_post_nc += remaining_metas
+                        early_break = True
+                        break
+                    else:
+                        # No, there's more left to match.
+                        # That means we've haven't matched the whole
+                        # sequence.
+                        return MatchResult.from_unmatched(segments)
 
-                # Is it an indent or dedent?
+                # Then handle any metas mid-sequence.
+                new_metas = ()
+                # Is it a raw meta?
                 if elem.is_meta:
+                    new_metas = (elem(),)
+                elif isinstance(elem, Conditional):
+                    if not elem.is_enabled(parse_context):
+                        # If it's not active, skip it.
+                        break
+                    # Then if it _is_ active. Match against it.
+                    with parse_context.deeper_match() as ctx:
+                        meta_match = elem.match(unmatched_segments, parse_context=ctx)
+                    # Did it match and leave the unmatched portion the same?
+                    if (
+                        meta_match
+                        and meta_match.unmatched_segments == unmatched_segments
+                    ):
+                        # If it did, it's just returned a new meta, keep it.
+                        new_metas = meta_match.matched_segments
+
+                # Do we have a new meta?
+                if new_metas:
                     # Elements with a negative indent value come AFTER
                     # the whitespace. Positive or neutral come BEFORE.
-                    if elem.indent_val < 0:
-                        meta_post_nc += (elem(),)
+                    # HOWEVER: If one is already there, we must preserve
+                    # the order. This forced ordering is fine if there's
+                    # a positive followed by a negative in the sequence,
+                    # but if by design a positive arrives *after* a
+                    # negative then we should insert it after the positive
+                    # instead.
+                    # https://github.com/sqlfluff/sqlfluff/issues/3836
+                    if all(e.indent_val >= 0 for e in new_metas) and not any(
+                        seg.indent_val < 1 for seg in meta_post_nc
+                    ):
+                        meta_pre_nc += new_metas
                     else:
-                        meta_pre_nc += (elem(),)
+                        meta_post_nc += new_metas
                     break
 
-                # Is it a conditional? If so is it active
-                if isinstance(elem, Conditional) and not elem.is_enabled(parse_context):
-                    # If it's not active, skip it.
-                    break
+                # NOTE: If we get this far, we know:
+                # - there are segments left to match on.
+                # - the next elements aren't metas (including metas in conditionals)
 
-                if len(pre_nc + mid_seg + post_nc) == 0:
-                    # We've run our of sequence without matching everything.
-                    # Do only optional or meta elements remain?
-                    if all(
-                        e.is_optional() or e.is_meta or isinstance(e, Conditional)
-                        for e in self._elements[idx:]
-                    ):
-                        # then it's ok, and we can return what we've got so far.
-                        # No need to deal with anything left over because we're at the
-                        # end, unless it's a meta segment.
-
-                        # We'll add those meta segments after any existing ones. So
-                        # the go on the meta_post_nc stack.
-                        for e in self._elements[idx:]:
-                            # If it's meta, instantiate it.
-                            if e.is_meta:
-                                meta_post_nc += (e(),)  # pragma: no cover TODO?
-                            # If it's conditional and it's enabled, match it.
-                            if isinstance(e, Conditional) and e.is_enabled(
-                                parse_context
-                            ):
-                                meta_match = e.match(tuple(), parse_context)
-                                if meta_match:
-                                    meta_post_nc += meta_match.matched_segments
-
-                        # Early break to exit via the happy match path.
-                        early_break = True
-                        break
-                    else:
-                        # we've got to the end of the sequence without matching all
-                        # required elements.
-                        return MatchResult.from_unmatched(segments)
+                # Split off any non-code before continuing to match.
+                if self.allow_gaps:
+                    pre_nc, mid_seg, post_nc = trim_non_code_segments(
+                        unmatched_segments
+                    )
                 else:
-                    # We've already dealt with potential whitespace above, so carry on
-                    # to matching
-                    with parse_context.deeper_match() as ctx:
-                        elem_match = elem.match(mid_seg, parse_context=ctx)
-
-                    if elem_match.has_match():
-                        # We're expecting mostly partial matches here, but complete
-                        # matches are possible. Don't be greedy with whitespace!
-                        matched_segments += (
-                            meta_pre_nc
-                            + pre_nc
-                            + meta_post_nc
-                            + elem_match.matched_segments
-                        )
-                        meta_pre_nc = ()
-                        meta_post_nc = ()
-                        unmatched_segments = elem_match.unmatched_segments + post_nc
-                        # Each time we do this, we do a sense check to make sure we
-                        # haven't dropped anything. (Because it's happened before!).
-                        if self.test_env:
-                            check_still_complete(
-                                segments,
-                                matched_segments.matched_segments,
-                                unmatched_segments,
-                            )
-                        # Break out of the while loop and move to the next element.
+                    pre_nc = ()
+                    mid_seg = unmatched_segments
+                    post_nc = ()
+
+                # We've already dealt with potential whitespace above, so carry on
+                # to matching
+                with parse_context.deeper_match() as ctx:
+                    elem_match = elem.match(mid_seg, parse_context=ctx)
+
+                if not elem_match.has_match():
+                    # If we can't match an element, we should ascertain whether it's
+                    # required. If so then fine, move on, but otherwise we should
+                    # crash out without a match. We have not matched the sequence.
+                    if elem.is_optional():
+                        # This will crash us out of the while loop and move us
+                        # onto the next matching element
                         break
                     else:
-                        # If we can't match an element, we should ascertain whether it's
-                        # required. If so then fine, move on, but otherwise we should
-                        # crash out without a match. We have not matched the sequence.
-                        if elem.is_optional():
-                            # This will crash us out of the while loop and move us
-                            # onto the next matching element
-                            break
-                        else:
-                            return MatchResult.from_unmatched(segments)
+                        return MatchResult.from_unmatched(segments)
+
+                # Otherwise we _do_ mave a match.
+
+                # We're expecting mostly partial matches here, but complete
+                # matches are possible. Don't be greedy with whitespace!
+                matched_segments += (
+                    meta_pre_nc + pre_nc + meta_post_nc + elem_match.matched_segments
+                )
+                meta_pre_nc = ()
+                meta_post_nc = ()
+                unmatched_segments = elem_match.unmatched_segments + post_nc
+                # Each time we do this, we do a sense check to make sure we
+                # haven't dropped anything. (Because it's happened before!).
+                if self.test_env:
+                    check_still_complete(
+                        segments,
+                        matched_segments.matched_segments,
+                        unmatched_segments,
+                    )
+                # Break out of the while loop. If there are more segments, we'll
+                # begin again with the next one. Otherwise well fall out to the
+                # closing return below.
+                break
 
         # If we get to here, we've matched all of the elements (or skipped them)
         # but still have some segments left (or perhaps have precisely zero left).
@@ -208,7 +265,7 @@ class Bracketed(Sequence):
         super().__init__(*args, **kwargs)
 
     @cached_method_for_parse_context
-    def simple(self, parse_context: ParseContext, crumbs=None) -> Optional[List[str]]:
+    def simple(self, parse_context: ParseContext, crumbs=None):
         """Does this matcher support a uppercase hash matching route?
 
         Bracketed does this easily, we just look for the bracket.
diff --git a/src/sqlfluff/core/parser/lexer.py b/src/sqlfluff/core/parser/lexer.py
index 475ca86..890b105 100644
--- a/src/sqlfluff/core/parser/lexer.py
+++ b/src/sqlfluff/core/parser/lexer.py
@@ -1,7 +1,7 @@
 """The code for the Lexer."""
 
 import logging
-from typing import Optional, List, Tuple, Union, NamedTuple
+from typing import Iterator, Optional, List, Tuple, Union, NamedTuple, Dict
 from uuid import UUID, uuid4
 import regex
 
@@ -19,12 +19,64 @@ from sqlfluff.core.parser.markers import PositionMarker
 from sqlfluff.core.errors import SQLLexError
 from sqlfluff.core.templaters import TemplatedFile
 from sqlfluff.core.config import FluffConfig
-from sqlfluff.core.templaters.base import RawFileSlice
+from sqlfluff.core.templaters.base import TemplatedFileSlice
+from sqlfluff.core.slice_helpers import is_zero_slice, offset_slice, to_tuple
 
 # Instantiate the lexer logger
 lexer_logger = logging.getLogger("sqlfluff.lexer")
 
 
+class BlockTracker:
+    """This is an object for keeping track of templating blocks.
+
+    Using the .enter() and .exit() methods on opening and closing
+    blocks, we can match up tags of the same level so that later
+    it's easier to treat them the same way in the linting engine.
+
+    In case looping means that we encounter the same block more
+    than once, we use cache uuids against their source location
+    so that if we try to re-enter the block again, it will get
+    the same uuid on the second pass.
+    """
+
+    _stack: List[UUID] = []
+    _map: Dict[Tuple[int, int], UUID] = {}
+
+    def enter(self, src_slice: slice) -> None:
+        """Add a block to the stack."""
+        key = to_tuple(src_slice)
+        uuid = self._map.get(key, None)
+
+        if not uuid:
+            uuid = uuid4()
+            self._map[key] = uuid
+            lexer_logger.debug(
+                "        Entering block stack @ %s: %s (fresh)",
+                src_slice,
+                uuid,
+            )
+        else:
+            lexer_logger.debug(
+                "        Entering block stack @ %s: %s (cached)",
+                src_slice,
+                uuid,
+            )
+
+        self._stack.append(uuid)
+
+    def exit(self) -> None:
+        """Pop a block from the stack."""
+        uuid = self._stack.pop()
+        lexer_logger.debug(
+            "        Exiting block stack: %s",
+            uuid,
+        )
+
+    def top(self) -> UUID:
+        """Get the uuid on top of the stack."""
+        return self._stack[-1]
+
+
 class LexedElement(NamedTuple):
     """An element matched during lexing."""
 
@@ -46,9 +98,11 @@ class TemplateElement(NamedTuple):
             raw=element.raw, template_slice=template_slice, matcher=element.matcher
         )
 
-    def to_segment(self, pos_marker):
+    def to_segment(self, pos_marker, subslice=None):
         """Create a segment from this lexed element."""
-        return self.matcher.construct_segment(self.raw, pos_marker=pos_marker)
+        return self.matcher.construct_segment(
+            self.raw[subslice] if subslice else self.raw, pos_marker=pos_marker
+        )
 
 
 class LexMatch(NamedTuple):
@@ -256,245 +310,384 @@ class RegexLexer(StringLexer):
         return None
 
 
-def _generate_template_loop_segments(
-    source_slice: slice,
-    last_source_slice: slice,
-    templated_idx: int,
-    templated_file: TemplatedFile,
-    block_uuid: UUID,
-) -> List[RawSegment]:
-    """Detect when we've gone backward in the source.
-
-    NOTE: If it's the _same_ slice then don't insert a marker
-    because we're probably just within a single templated
-    section.
-    """
-    if (
-        last_source_slice
-        and last_source_slice.stop > source_slice.start
-        and last_source_slice.stop != source_slice.stop
-        and last_source_slice != source_slice
-    ):
-        # If we have, insert a loop marker to reflect that.
-        lexer_logger.debug("      Backward jump detected. Inserting Loop Marker")
-        # TemplateLoops should have a dedent before and an indent after.
-        # The position maker is the same for all of them.
-        pos_marker = PositionMarker.from_point(
-            last_source_slice.stop,
-            templated_idx,
-            templated_file,
-        )
-        return [
-            Dedent(
-                is_template=True,
-                pos_marker=pos_marker,
-            ),
-            TemplateLoop(pos_marker=pos_marker, block_uuid=block_uuid),
-            Indent(
-                is_template=True,
-                pos_marker=pos_marker,
-            ),
-        ]
-    else:
-        return []
-
-
-def _generate_placeholder_segments(
-    source_slice: slice,
-    # NOTE: no last_source_slice implies start of file.
-    last_source_slice: Optional[slice],
-    template_slice: slice,
-    source_only_slices: List[RawFileSlice],
+def _handle_zero_length_slice(
+    tfs: TemplatedFileSlice,
+    next_tfs: Optional[TemplatedFileSlice],
+    block_stack: BlockTracker,
     templated_file: TemplatedFile,
     add_indents: bool,
-    block_uuid_stack: List[UUID],
-) -> Tuple[List[RawSegment], slice, List[UUID]]:
-    """Generate any template placeholder segments.
-
-    The input source_slice, will potentially include not just
-    elements from the templated file, but also sections of
-    template code which no longer appear in the rendered file.
-
-    This code extracts them, adjusts the source_slice and generates
-    appropriate template segments to insert.
-
-    The block stack is to provide a consistent reference between template tags
-    of the same expression (i.e. link an {% if .. %} and an
-    {% endif %}. This is useful metadata for any downstream
-    edits we might want to make and keep their position in line.
-
-    NOTE: For reasons which aren't well documented, any literal
-    elements will come at the end of the source_slice, and any
-    source only elements will usually come at the start. This
-    code takes advantage of that.
+):
+    """Generate placeholders and loop segments from a zero length slice.
+
+    This method checks for:
+    1. Backward jumps (inserting :obj:`TemplateLoop`).
+    2. Forward jumps (inserting :obj:`TemplateSegment`).
+    3. Blocks (inserting :obj:`TemplateSegment`).
+    4. Unrendered template elements(inserting :obj:`TemplateSegment`).
+
+    For blocks and loops, :obj:`Indent` and :obj:`Dedent` segments are
+    yielded around them as appropriate.
+
+    NOTE: block_stack is _mutated_ by this method.
     """
-    so_slices = []
-    block_stack = block_uuid_stack.copy()
-    # First check whether we've got any relevant source only slices for
-    # this position in the file.
-    if last_source_slice != source_slice:
-        for source_only_slice in source_only_slices:
-            # If it's later in the source, stop looking. Any later
-            # ones *also* won't match.
-            if source_only_slice.source_idx >= source_slice.stop:
-                break
-            elif source_only_slice.source_idx >= source_slice.start:
-                so_slices.append(source_only_slice)
-
-    # No relevant source only slices in this instance. Return
-    if not so_slices:
-        return [], source_slice, block_stack
-
-    lexer_logger.debug("    Collected Source Only Slices")
-    for so_slice in so_slices:
-        lexer_logger.debug("       %s", so_slice)
-
-    # Calculate some things which will be useful
-    templ_str = templated_file.templated_str[template_slice]
-    source_str = templated_file.source_str[source_slice]
-
-    # For reasons which aren't entirely clear right now, if there is
-    # an included literal, it will always be at the end. Let's see if it's
-    # there.
-    if source_str.endswith(templ_str):
-        existing_len = len(templ_str)
-    else:
-        existing_len = 0
-
-    # Calculate slices, without any existing literal.
-    placeholder_slice = slice(source_slice.start, source_slice.stop - existing_len)
-    placeholder_str = source_str[:-existing_len]
-    source_slice = slice(source_slice.stop - existing_len, source_slice.stop)
-
-    # If it doesn't manage to extract a placeholder string from the source
-    # just concatenate the source only strings. There is almost always
-    # only one of them.
-    if not placeholder_str:
-        placeholder_str = "".join(s.raw for s in so_slices)
-
-    # The Jinja templater sometimes returns source-only slices with
-    # gaps between. For example, in this section:
-    #
-    #   {% else %}
-    #   JOIN
-    #       {{action}}_raw_effect_sizes
-    #   USING
-    #       ({{ states }})
-    #   {% endif %}
-    #
-    # we might get {% else %} and {% endif %} slices, without the
-    # 4 lines between. This indicates those lines were not executed
-    # In this case, generate a placeholder where the skipped code is
-    # omitted but noted with a brief string, e.g.:
-    #
-    # "{% else %}... [103 unused template characters] ...{% endif %}".
-    #
-    # This is more readable -- it would be REALLY confusing for a
-    # placeholder to include code that wasn't even executed!!
-    if len(so_slices) >= 2:
-        has_gap = False
-        gap_placeholder_parts = []
-        last_slice = None
-        # For each slice...
-        for so_slice in so_slices:
-            # If it's not the first slice, was there a gap?
-            if last_slice:
-                end_last = last_slice.source_idx + len(last_slice.raw)
-                chars_skipped = so_slice.source_idx - end_last
-                if chars_skipped:
-                    # Yes, gap between last_slice and so_slice.
-                    has_gap = True
-
-                    # Generate a string documenting the gap.
-                    if chars_skipped >= 10:
-                        gap_placeholder_parts.append(
-                            f"... [{chars_skipped} unused template " "characters] ..."
-                        )
-                    else:
-                        gap_placeholder_parts.append("...")
-            # Now add the slice's source.
-            gap_placeholder_parts.append(so_slice.raw)
-            last_slice = so_slice
-        if has_gap:
-            placeholder_str = "".join(gap_placeholder_parts)
-    lexer_logger.debug(
-        "    Overlap Length: %s. PS: %s, LS: %s, p_str: %r, templ_str: %r",
-        existing_len,
-        placeholder_slice,
-        source_slice,
-        placeholder_str,
-        templ_str,
-    )
+    assert is_zero_slice(tfs.templated_slice)
+    # First check for jumps. Backward initially, because in the backward
+    # case we don't render the element we find first.
+    # That requires being able to look past to the next element.
+    if tfs.slice_type.startswith("block") and next_tfs:
+        # Look for potential backward jump
+        if next_tfs.source_slice.start < tfs.source_slice.start:
+            lexer_logger.debug("      Backward jump detected. Inserting Loop Marker")
+            # If we're here remember we're on the tfs which is the block end
+            # i.e. not the thing we want to render.
+            pos_marker = PositionMarker.from_point(
+                tfs.source_slice.start,
+                tfs.templated_slice.start,
+                templated_file,
+            )
+            if add_indents:
+                yield Dedent(
+                    is_template=True,
+                    pos_marker=pos_marker,
+                )
 
-    # Calculate potential indent/dedent
-    segment_buffer: List[RawSegment] = []
-    block_slices = sum(s.slice_type.startswith("block_") for s in so_slices)
-    lead_dedent = so_slices[0].slice_type in ("block_end", "block_mid")
-    trail_indent = so_slices[-1].slice_type in ("block_start", "block_mid")
-    lexer_logger.debug(
-        "    Block Slices: %s. Lead: %s, Trail: %s, Add: %s",
-        block_slices,
-        lead_dedent,
-        trail_indent,
-        add_indents,
-    )
+            yield TemplateLoop(pos_marker=pos_marker, block_uuid=block_stack.top())
 
-    # Update block stack
-    block_uuid = None
-    for so_slice in so_slices:
-        if so_slice.slice_type == "block_end":
-            block_uuid = block_stack.pop()
-        elif so_slice.slice_type == "block_start":
-            block_uuid = uuid4()
-            block_stack.append(block_uuid)
-
-    # Add a dedent if appropriate.
-    if lead_dedent and add_indents:
-        lexer_logger.debug("      DEDENT")
-        segment_buffer.append(
-            Dedent(
+            if add_indents:
+                yield Indent(
+                    is_template=True,
+                    pos_marker=pos_marker,
+                )
+            # Move on to the next templated slice. Don't render this directly.
+            return
+
+    # Then handle blocks (which aren't jumps backward)
+    if tfs.slice_type.startswith("block"):
+        # It's a block. Yield a placeholder with potential indents.
+
+        # Update block stack or add indents
+        if tfs.slice_type == "block_start":
+            block_stack.enter(tfs.source_slice)
+        elif add_indents and tfs.slice_type in ("block_end", "block_mid"):
+            yield Dedent(
                 is_template=True,
                 pos_marker=PositionMarker.from_point(
-                    placeholder_slice.start,
-                    template_slice.start,
+                    tfs.source_slice.start,
+                    tfs.templated_slice.start,
                     templated_file,
                 ),
+                # NOTE: We mark the dedent with the block uuid too.
+                block_uuid=block_stack.top(),
             )
-        )
 
-    # Always add a placeholder
-    segment_buffer.append(
-        TemplateSegment(
-            pos_marker=PositionMarker(
-                placeholder_slice,
-                slice(
-                    template_slice.start,
-                    template_slice.start,
-                ),
-                templated_file,
-            ),
-            source_str=placeholder_str,
-            block_type=so_slices[0].slice_type if len(so_slices) == 1 else "compound",
-            block_uuid=block_uuid,
+        yield TemplateSegment.from_slice(
+            tfs.source_slice,
+            tfs.templated_slice,
+            block_type=tfs.slice_type,
+            templated_file=templated_file,
+            block_uuid=block_stack.top(),
         )
-    )
-    lexer_logger.debug("      Placeholder: %s, %r", segment_buffer[-1], placeholder_str)
 
-    # Add an indent if appropriate.
-    if trail_indent and add_indents:
-        lexer_logger.debug("      INDENT")
-        segment_buffer.append(
-            Indent(
+        # Update block stack or add indents
+        if tfs.slice_type == "block_end":
+            block_stack.exit()
+        elif add_indents and tfs.slice_type in ("block_start", "block_mid"):
+            yield Indent(
                 is_template=True,
                 pos_marker=PositionMarker.from_point(
-                    placeholder_slice.stop,
-                    template_slice.start,
+                    tfs.source_slice.stop,
+                    tfs.templated_slice.stop,
                     templated_file,
                 ),
+                # NOTE: We mark the indent with the block uuid too.
+                block_uuid=block_stack.top(),
             )
-        )
 
-    return segment_buffer, source_slice, block_stack
+        # Before we move on, we might have a _forward_ jump to the next
+        # element. That element can handle itself, but we'll add a
+        # placeholder for it here before we move on.
+        if next_tfs:
+            # Identify whether we have a skip.
+            skipped_chars = next_tfs.source_slice.start - tfs.source_slice.stop
+            placeholder_str = ""
+            if skipped_chars >= 10:
+                placeholder_str = (
+                    f"... [{skipped_chars} unused template " "characters] ..."
+                )
+            elif skipped_chars:
+                placeholder_str = "..."
+
+            # Handle it if we do.
+            if placeholder_str:
+                lexer_logger.debug("      Forward jump detected. Inserting placeholder")
+                yield TemplateSegment(
+                    pos_marker=PositionMarker(
+                        slice(tfs.source_slice.stop, next_tfs.source_slice.start),
+                        # Zero slice in the template.
+                        tfs.templated_slice,
+                        templated_file,
+                    ),
+                    source_str=placeholder_str,
+                    block_type="skipped_source",
+                )
+
+        # Move on
+        return
+
+    # Always return the slice, even if the source slice was also zero length.  Some
+    # templaters might want to pass through totally zero length slices as a way of
+    # marking locations in the middle of templated output.
+    yield TemplateSegment.from_slice(
+        tfs.source_slice,
+        tfs.templated_slice,
+        tfs.slice_type,
+        templated_file,
+    )
+
+
+def _iter_segments(
+    lexed_elements: List[TemplateElement],
+    templated_file_slices: List[TemplatedFileSlice],
+    templated_file: TemplatedFile,
+    add_indents: bool = True,
+) -> Iterator[RawSegment]:
+    # An index to track where we've got to in the templated file.
+    tfs_idx = 0
+    # We keep a map of previous block locations in case they re-occur.
+    block_stack = BlockTracker()
+
+    # Now work out source slices, and add in template placeholders.
+    for idx, element in enumerate(lexed_elements):
+        # We're working through elements in the rendered file.
+        # When they enter this code they don't have a position in the source.
+        # We already have a map of how templated elements map to the source file
+        # so we work through them to work out what's going on. In theory we can
+        # step through the two lists in lock step.
+
+        # i.e. we worked through the lexed elements, but check off the templated
+        # file slices as we go.
+
+        # Output the slice as we lex.
+        lexer_logger.debug("  %s: %s. [tfs_idx = %s]", idx, element, tfs_idx)
+
+        # All lexed elements, by definition, have a position in the templated
+        # file. That means we've potentially got zero-length elements we also
+        # need to consider. We certainly need to consider templated slices
+        # at tfs_idx. But we should consider some others after that which we
+        # might also need to consider.
+
+        # A lexed element is either a literal in the raw file or the result
+        # (or part of the result) of a template placeholder. We don't make
+        # placeholders for any variables which return a non-zero length of
+        # code. We do add placeholders for others.
+
+        # The amount of the current element which has already been consumed.
+        consumed_element_length = 0
+        # The position in the source which we still need to yield from.
+        stashed_source_idx = None
+
+        for tfs_idx, tfs in enumerate(templated_file_slices[tfs_idx:], tfs_idx):
+            lexer_logger.debug("      %s: %s", tfs_idx, tfs)
+
+            # Is it a zero slice?
+            if is_zero_slice(tfs.templated_slice):
+                next_tfs = (
+                    templated_file_slices[tfs_idx + 1]
+                    if tfs_idx + 1 < len(templated_file_slices)
+                    else None
+                )
+                yield from _handle_zero_length_slice(
+                    tfs, next_tfs, block_stack, templated_file, add_indents
+                )
+                continue
+
+            if tfs.slice_type == "literal":
+                # There's a literal to deal with here. Yield as much as we can.
+
+                # Can we cover this whole lexed element with the current templated
+                # slice without moving on?
+                tfs_offset = tfs.source_slice.start - tfs.templated_slice.start
+                # NOTE: Greater than OR EQUAL, to include the case of it matching
+                # length exactly.
+                if element.template_slice.stop <= tfs.templated_slice.stop:
+                    lexer_logger.debug(
+                        "     Consuming whole from literal. Existing Consumed: %s",
+                        consumed_element_length,
+                    )
+                    # If we have a stashed start use that. Otherwise infer start.
+                    if stashed_source_idx is not None:
+                        slice_start = stashed_source_idx
+                    else:
+                        slice_start = (
+                            element.template_slice.start
+                            + consumed_element_length
+                            + tfs_offset
+                        )
+                    yield element.to_segment(
+                        pos_marker=PositionMarker(
+                            slice(
+                                slice_start,
+                                element.template_slice.stop + tfs_offset,
+                            ),
+                            element.template_slice,
+                            templated_file,
+                        ),
+                        subslice=slice(consumed_element_length, None),
+                    )
+
+                    # If it was an exact match, consume the templated element too.
+                    if element.template_slice.stop == tfs.templated_slice.stop:
+                        tfs_idx += 1
+                    # In any case, we're done with this element. Move on
+                    break
+                elif element.template_slice.start == tfs.templated_slice.stop:
+                    # Did we forget to move on from the last tfs and there's
+                    # overlap?
+                    # NOTE: If the rest of the logic works, this should never
+                    # happen.
+                    lexer_logger.debug("     NOTE: Missed Skip")  # pragma: no cover
+                    continue  # pragma: no cover
+                else:
+                    # This means that the current lexed element spans across
+                    # multiple templated file slices.
+                    lexer_logger.debug("     Consuming whole spanning literal")
+                    # This almost certainly means there's a templated element
+                    # in the middle of a whole lexed element.
+
+                    # What we do here depends on whether we're allowed to split
+                    # lexed elements. This is basically only true if it's whitespace.
+                    # NOTE: We should probably make this configurable on the
+                    # matcher object, but for now we're going to look for the
+                    # name of the lexer.
+                    if element.matcher.name == "whitespace":
+                        # We *can* split it!
+                        # Consume what we can from this slice and move on.
+                        lexer_logger.debug(
+                            "     Consuming split whitespace from literal. "
+                            "Existing Consumed: %s",
+                            consumed_element_length,
+                        )
+                        if stashed_source_idx is not None:
+                            raise NotImplementedError(  # pragma: no cover
+                                "Found literal whitespace with stashed idx!"
+                            )
+                        incremental_length = (
+                            tfs.templated_slice.stop - element.template_slice.start
+                        )
+                        yield element.to_segment(
+                            pos_marker=PositionMarker(
+                                slice(
+                                    element.template_slice.start
+                                    + consumed_element_length
+                                    + tfs_offset,
+                                    tfs.templated_slice.stop + tfs_offset,
+                                ),
+                                element.template_slice,
+                                templated_file,
+                            ),
+                            # Subdivide the existing segment.
+                            subslice=offset_slice(
+                                consumed_element_length,
+                                incremental_length,
+                            ),
+                        )
+                        consumed_element_length += incremental_length
+                        continue
+                    else:
+                        # We can't split it. We're going to end up yielding a segment
+                        # which spans multiple slices. Stash the type, and if we haven't
+                        # set the start yet, stash it too.
+                        lexer_logger.debug("     Spilling over literal slice.")
+                        if stashed_source_idx is None:
+                            stashed_source_idx = (
+                                element.template_slice.start + tfs_offset
+                            )
+                            lexer_logger.debug(
+                                "     Stashing a source start. %s", stashed_source_idx
+                            )
+                        continue
+
+            elif tfs.slice_type in ("templated", "block_start"):
+                # Found a templated slice. Does it have length in the templated file?
+                # If it doesn't, then we'll pick it up next.
+                if not is_zero_slice(tfs.templated_slice):
+                    # If it's a block_start. Append to the block stack.
+                    # NOTE: This is rare, but call blocks do occasionally
+                    # have length (and so don't get picked up by
+                    # _handle_zero_length_slice)
+                    if tfs.slice_type == "block_start":
+                        block_stack.enter(tfs.source_slice)
+
+                    # Is our current element totally contained in this slice?
+                    if element.template_slice.stop <= tfs.templated_slice.stop:
+                        lexer_logger.debug("     Contained templated slice.")
+                        # Yes it is. Add lexed element with source slices as the whole
+                        # span of the source slice for the file slice.
+                        # If we've got an existing stashed source start, use that
+                        # as the start of the source slice.
+                        if stashed_source_idx is not None:
+                            slice_start = stashed_source_idx
+                        else:
+                            slice_start = (
+                                tfs.source_slice.start + consumed_element_length
+                            )
+                        yield element.to_segment(
+                            pos_marker=PositionMarker(
+                                slice(
+                                    slice_start,
+                                    # The end in the source is the end of the templated
+                                    # slice. We can't subdivide any better.
+                                    tfs.source_slice.stop,
+                                ),
+                                element.template_slice,
+                                templated_file,
+                            ),
+                            subslice=slice(consumed_element_length, None),
+                        )
+
+                        # If it was an exact match, consume the templated element too.
+                        if element.template_slice.stop == tfs.templated_slice.stop:
+                            tfs_idx += 1
+                        # Carry on to the next lexed element
+                        break
+                    # We've got an element which extends beyond this templated slice.
+                    # This means that a _single_ lexed element claims both some
+                    # templated elements and some non-templated elements. That could
+                    # include all kinds of things (and from here we don't know what
+                    # else is yet to come, comments, blocks, literals etc...).
+
+                    # In the `literal` version of this code we would consider
+                    # splitting the literal element here, but in the templated
+                    # side we don't. That's because the way that templated tokens
+                    # are lexed, means that they should arrive "pre-split".
+                    else:
+                        # Stash the source idx for later when we do make a segment.
+                        lexer_logger.debug("     Spilling over templated slice.")
+                        if stashed_source_idx is None:
+                            stashed_source_idx = tfs.source_slice.start
+                            lexer_logger.debug(
+                                "     Stashing a source start as lexed element spans "
+                                "over the end of a template slice. %s",
+                                stashed_source_idx,
+                            )
+                        # Move on to the next template slice
+                        continue
+
+            raise NotImplementedError(
+                f"Unable to process slice: {tfs}"
+            )  # pragma: no cover
+
+    # If templated elements are left, yield them.
+    # We can assume they're all zero length if we're here.
+    for tfs_idx, tfs in enumerate(templated_file_slices[tfs_idx:], tfs_idx):
+        next_tfs = (
+            templated_file_slices[tfs_idx + 1]
+            if tfs_idx + 1 < len(templated_file_slices)
+            else None
+        )
+        yield from _handle_zero_length_slice(
+            tfs, next_tfs, block_stack, templated_file, add_indents
+        )
 
 
 class Lexer:
@@ -513,7 +706,7 @@ class Lexer:
 
         self.last_resort_lexer = last_resort_lexer or RegexLexer(
             "<unlexable>",
-            r"[^\t\n\,\.\ \-\+\*\\\/\'\"\;\:\[\]\(\)\|]*",
+            r"[^\t\n\ ]*",
             UnlexableSegment,
         )
 
@@ -545,7 +738,7 @@ class Lexer:
                 if not resort_res:  # pragma: no cover
                     # If we STILL can't match, then just panic out.
                     raise SQLLexError(
-                        f"Fatal. Unable to lex characters: {0!r}".format(
+                        "Fatal. Unable to lex characters: {0!r}".format(
                             res.forward_string[:10] + "..."
                             if len(res.forward_string) > 9
                             else res.forward_string
@@ -574,85 +767,14 @@ class Lexer:
         self, elements: List[TemplateElement], templated_file: TemplatedFile
     ) -> Tuple[RawSegment, ...]:
         """Convert a tuple of lexed elements into a tuple of segments."""
-        # Working buffer to build up segments
-        segment_buffer: List[RawSegment] = []
-        block_stack: List[UUID] = []
-
-        add_indents = self.config.get("template_blocks_indent", "indentation")
-
         lexer_logger.info("Elements to Segments.")
-        # Get the templated slices to re-insert tokens for them
-        source_only_slices = templated_file.source_only_slices()
-        lexer_logger.info("Source-only slices: %s", source_only_slices)
-        stash_source_slice, last_source_slice = None, None
-
-        # Now work out source slices, and add in template placeholders.
-        for idx, element in enumerate(elements):
-            # Calculate Source Slice
-            if idx != 0:
-                last_source_slice = stash_source_slice
-            source_slice = templated_file.templated_slice_to_source_slice(
-                element.template_slice
-            )
-            stash_source_slice = source_slice
-            # Output the slice as we lex.
-            lexer_logger.debug(
-                "  %s, %s, %s, %r",
-                idx,
-                element,
-                source_slice,
-                templated_file.templated_str[element.template_slice],
-            )
-
-            # Detect template loops
-            if last_source_slice:
-                segment_buffer.extend(
-                    _generate_template_loop_segments(
-                        source_slice,
-                        last_source_slice,
-                        element.template_slice.start,
-                        templated_file,
-                        block_uuid=block_stack[-1] if block_stack else None,
-                    )
-                )
-
-            # Generate template segments and adjust source slice accordingly
-            placeholders, source_slice, block_stack = _generate_placeholder_segments(
-                source_slice,
-                last_source_slice,
-                element.template_slice,
-                source_only_slices,
-                templated_file,
-                add_indents,
-                block_stack,
-            )
-            segment_buffer.extend(placeholders)
-
-            # Add the actual segment
-            segment_buffer.append(
-                element.to_segment(
-                    pos_marker=PositionMarker(
-                        source_slice,
-                        element.template_slice,
-                        templated_file,
-                    ),
-                )
+        add_indents = self.config.get("template_blocks_indent", "indentation")
+        # Delegate to _iter_segments
+        segment_buffer: List[RawSegment] = list(
+            _iter_segments(
+                elements, templated_file.sliced_file, templated_file, add_indents
             )
-
-            # Generate placeholders for any source-only slices that *follow*
-            # the last element. This happens, for example, if a Jinja templated
-            # file ends with "{% endif %}", and there's no trailing newline.
-            if last_source_slice and idx == len(elements) - 1:
-                placeholders, _, _ = _generate_placeholder_segments(
-                    slice(source_slice.stop, len(templated_file.source_str)),
-                    last_source_slice,
-                    slice(element.template_slice.stop, element.template_slice.stop),
-                    source_only_slices,
-                    templated_file,
-                    add_indents,
-                    block_stack,
-                )
-                segment_buffer.extend(placeholders)
+        )
 
         # Add an end of file marker
         segment_buffer.append(
@@ -662,7 +784,6 @@ class Lexer:
                 else PositionMarker.from_point(0, 0, templated_file)
             )
         )
-
         # Convert to tuple before return
         return tuple(segment_buffer)
 
@@ -717,7 +838,7 @@ class Lexer:
         idx = 0
         templated_buff: List[TemplateElement] = []
         for element in elements:
-            template_slice = slice(idx, idx + len(element.raw))
+            template_slice = offset_slice(idx, len(element.raw))
             idx += len(element.raw)
             templated_buff.append(TemplateElement.from_element(element, template_slice))
             if (
diff --git a/src/sqlfluff/core/parser/markers.py b/src/sqlfluff/core/parser/markers.py
index 470ffeb..aec10f1 100644
--- a/src/sqlfluff/core/parser/markers.py
+++ b/src/sqlfluff/core/parser/markers.py
@@ -6,6 +6,8 @@ This class is a construct to keep track of positions within a file.
 from dataclasses import dataclass
 from typing import Tuple, TYPE_CHECKING
 
+from sqlfluff.core.slice_helpers import zero_slice
+
 if TYPE_CHECKING:
     from sqlfluff.core.templaters import TemplatedFile  # pragma: no cover
 
@@ -83,12 +85,38 @@ class PositionMarker:
     ):
         """Convenience method for creating point markers."""
         return cls(
-            slice(source_point, source_point),
-            slice(templated_point, templated_point),
+            zero_slice(source_point),
+            zero_slice(templated_point),
             templated_file,
             **kwargs,
         )
 
+    @classmethod
+    def from_points(
+        cls,
+        start_point_marker: "PositionMarker",
+        end_point_marker: "PositionMarker",
+    ):
+        """Construct a position marker from the section between two points."""
+        return cls(
+            slice(
+                start_point_marker.source_slice.start,
+                end_point_marker.source_slice.stop,
+            ),
+            slice(
+                start_point_marker.templated_slice.start,
+                end_point_marker.templated_slice.stop,
+            ),
+            # The templated file references from the point markers
+            # should be the same, so we're just going to pick one.
+            # TODO: If we assert that in this function, it's actually not
+            # true - but preliminary debugging on this did not reveal why.
+            start_point_marker.templated_file,
+            # Line position should be of the _start_ of the section.
+            start_point_marker.working_line_no,
+            start_point_marker.working_line_pos,
+        )
+
     @classmethod
     def from_child_markers(cls, *markers):
         """Create a parent marker from it's children."""
@@ -202,7 +230,7 @@ class PositionMarker:
         This value is used for:
         - Ignoring linting errors in templated sections.
         - Whether `iter_patches` can return without recursing.
-        - Whether certain rules (such as L046) are triggered.
+        - Whether certain rules (such as JJ01) are triggered.
         """
         return self.templated_file.is_source_slice_literal(self.source_slice)
 
diff --git a/src/sqlfluff/core/parser/matchable.py b/src/sqlfluff/core/parser/matchable.py
index 10eed00..f34e145 100644
--- a/src/sqlfluff/core/parser/matchable.py
+++ b/src/sqlfluff/core/parser/matchable.py
@@ -2,7 +2,7 @@
 
 import copy
 from abc import ABC, abstractmethod
-from typing import List, Optional, Tuple, TYPE_CHECKING
+from typing import Optional, Tuple, TYPE_CHECKING, FrozenSet
 
 
 if TYPE_CHECKING:  # pragma: no cover
@@ -20,9 +20,15 @@ class Matchable(ABC):
     @abstractmethod
     def simple(
         self, parse_context: "ParseContext", crumbs: Optional[Tuple[str, ...]] = None
-    ) -> Optional[List[str]]:
+    ) -> Optional[Tuple[FrozenSet[str], FrozenSet[str]]]:
         """Try to obtain a simple response from the matcher.
 
+        Returns:
+            None - if not simple.
+            Tuple of two sets of strings if simple. The first is a set of
+                uppercase raw strings which would match. The second is a set
+                of segment types that would match.
+
         NOTE: the crumbs kwarg is designed to be used by Ref to
         detect recursion.
         """
@@ -34,3 +40,12 @@ class Matchable(ABC):
     def copy(self, **kwargs) -> "Matchable":  # pragma: no cover TODO?
         """Copy this Matchable."""
         return copy.copy(self)
+
+    @abstractmethod
+    def cache_key(self) -> str:
+        """A string to use for cache keying.
+
+        This string should be unique at the parsing stage such that
+        if there has already been a match against this key for a set
+        of segments, that we can reuse that match.
+        """
diff --git a/src/sqlfluff/core/parser/parsers.py b/src/sqlfluff/core/parser/parsers.py
index 646056c..b1a7494 100644
--- a/src/sqlfluff/core/parser/parsers.py
+++ b/src/sqlfluff/core/parser/parsers.py
@@ -4,8 +4,9 @@ Matchable objects which return individual segments.
 """
 
 from abc import abstractmethod
+from uuid import uuid4
 import regex
-from typing import Collection, Type, Optional, List, Tuple, Union
+from typing import Collection, Type, Optional, Tuple, Union
 
 from sqlfluff.core.parser.context import ParseContext
 from sqlfluff.core.parser.matchable import Matchable
@@ -32,6 +33,15 @@ class BaseParser(Matchable):
         self.type = type
         self.optional = optional
         self.segment_kwargs = segment_kwargs or {}
+        # Generate a cache key
+        self._cache_key = uuid4().hex
+
+    def cache_key(self) -> str:
+        """Get the cache key for this parser.
+
+        For parsers, they're unique per-instance.
+        """
+        return self._cache_key
 
     def is_optional(self) -> bool:
         """Return whether this element is optional."""
@@ -113,16 +123,13 @@ class TypedParser(BaseParser):
             **segment_kwargs,
         )
 
-    def simple(cls, parse_context: ParseContext, crumbs=None) -> Optional[List[str]]:
+    def simple(cls, parse_context: ParseContext, crumbs=None):
         """Does this matcher support a uppercase hash matching route?
 
-        TypedParser segment does NOT for now. We might need to later for efficiency.
-
-        There is a way that this *could* be enabled, by allowing *another*
-        shortcut route, to look ahead at the types of upcoming segments,
-        rather than their content.
+        TypedParser segment doesn't support matching against raw strings,
+        but it does support it against types.
         """
-        return None
+        return frozenset(), frozenset((cls.template,))
 
     def _is_first_match(self, segment: BaseSegment):
         """Return true if the type matches the target type."""
@@ -142,7 +149,7 @@ class StringParser(BaseParser):
     ):
         self.template = template.upper()
         # Create list version upfront to avoid recreating it multiple times.
-        self._simple = [self.template]
+        self._simple = frozenset((self.template,))
         super().__init__(
             raw_class=raw_class,
             type=type,
@@ -150,13 +157,13 @@ class StringParser(BaseParser):
             **segment_kwargs,
         )
 
-    def simple(self, parse_context: "ParseContext", crumbs=None) -> Optional[List[str]]:
+    def simple(self, parse_context: "ParseContext", crumbs=None):
         """Return simple options for this matcher.
 
         Because string matchers are not case sensitive we can
         just return the template here.
         """
-        return self._simple
+        return self._simple, frozenset()
 
     def _is_first_match(self, segment: BaseSegment):
         """Does the segment provided match according to the current rules."""
@@ -180,7 +187,7 @@ class MultiStringParser(BaseParser):
     ):
         self.templates = {template.upper() for template in templates}
         # Create list version upfront to avoid recreating it multiple times.
-        self._simple = list(self.templates)
+        self._simple = frozenset(self.templates)
         super().__init__(
             raw_class=raw_class,
             type=type,
@@ -188,13 +195,13 @@ class MultiStringParser(BaseParser):
             **segment_kwargs,
         )
 
-    def simple(self, parse_context: "ParseContext", crumbs=None) -> Optional[List[str]]:
+    def simple(self, parse_context: "ParseContext", crumbs=None):
         """Return simple options for this matcher.
 
         Because string matchers are not case sensitive we can
         just return the templates here.
         """
-        return self._simple
+        return self._simple, frozenset()
 
     def _is_first_match(self, segment: BaseSegment):
         """Does the segment provided match according to the current rules."""
@@ -230,7 +237,7 @@ class RegexParser(BaseParser):
             **segment_kwargs,
         )
 
-    def simple(cls, parse_context: ParseContext, crumbs=None) -> Optional[List[str]]:
+    def simple(cls, parse_context: ParseContext, crumbs=None):
         """Does this matcher support a uppercase hash matching route?
 
         Regex segment does NOT for now. We might need to later for efficiency.
diff --git a/src/sqlfluff/core/parser/segments/__init__.py b/src/sqlfluff/core/parser/segments/__init__.py
index 88ff673..1dab65c 100644
--- a/src/sqlfluff/core/parser/segments/__init__.py
+++ b/src/sqlfluff/core/parser/segments/__init__.py
@@ -25,6 +25,7 @@ from sqlfluff.core.parser.segments.meta import (
     MetaSegment,
     Indent,
     Dedent,
+    ImplicitIndent,
     TemplateSegment,
     EndOfFile,
     TemplateLoop,
@@ -49,6 +50,7 @@ __all__ = (
     "MetaSegment",
     "Indent",
     "Dedent",
+    "ImplicitIndent",
     "TemplateSegment",
     "EndOfFile",
     "TemplateLoop",
diff --git a/src/sqlfluff/core/parser/segments/base.py b/src/sqlfluff/core/parser/segments/base.py
index fecdea3..275215b 100644
--- a/src/sqlfluff/core/parser/segments/base.py
+++ b/src/sqlfluff/core/parser/segments/base.py
@@ -13,7 +13,7 @@ from collections.abc import MutableSet
 from copy import deepcopy, copy
 from dataclasses import dataclass, field, replace
 from io import StringIO
-from itertools import takewhile, chain
+from itertools import chain
 from typing import (
     Any,
     Callable,
@@ -86,11 +86,13 @@ class PathStep:
         segment (:obj:`BaseSegment`): The segment in the chain.
         idx (int): The index of the target within its `segment`.
         len (int): The number of children `segment` has.
+        code_idxs (:obj:`tuple` of int): The indices which contain code.
     """
 
     segment: "BaseSegment"
     idx: int
     len: int
+    code_idxs: Tuple[int, ...]
 
 
 @dataclass
@@ -111,35 +113,6 @@ class FixPatch:
         """Generate a tuple of this fix for deduping."""
         return (self.source_slice, self.fixed_raw)
 
-    @classmethod
-    def infer_from_template(
-        cls,
-        templated_slice: slice,
-        fixed_raw: str,
-        patch_category: str,
-        templated_file: TemplatedFile,
-    ):
-        """Infer source position from just templated position.
-
-        In cases where we expect it to be uncontroversial it
-        is sometimes more straightforward to just leverage
-        the existing mapping functions to auto-generate the
-        source position rather than calculating it explicitly.
-        """
-        # NOTE: There used to be error handling here to catch ValueErrors.
-        # Removed in July 2022 because untestable.
-        source_slice = templated_file.templated_slice_to_source_slice(
-            templated_slice,
-        )
-        return cls(
-            source_slice=source_slice,
-            templated_slice=templated_slice,
-            patch_category=patch_category,
-            fixed_raw=fixed_raw,
-            templated_str=templated_file.templated_str[templated_slice],
-            source_str=templated_file.source_str[source_slice],
-        )
-
 
 @dataclass
 class AnchorEditInfo:
@@ -160,6 +133,10 @@ class AnchorEditInfo:
         We also allow potentially multiple source fixes on the same
         anchor by condensing them together here.
         """
+        if fix in self.fixes:
+            # Deduplicate fixes in case it's already in there.
+            return
+
         if fix.is_just_source_edit():
             assert fix.edit
             # is_just_source_edit confirms there will be a list
@@ -232,6 +209,10 @@ class SegmentMetaclass(type):
         here saves calculating it at runtime for each
         instance of the class.
         """
+        # Create a cache uuid on definition.
+        # We do it here so every _definition_ of a segment
+        # gets a unique UUID regardless of dialect.
+        class_dict["_cache_key"] = uuid4().hex
         class_obj = super().__new__(mcs, name, bases, class_dict)
         added_type = class_dict.get("type", None)
         class_types = {added_type} if added_type else set()
@@ -278,6 +259,10 @@ class BaseSegment(metaclass=SegmentMetaclass):
     # What other kwargs need to be copied when applying fixes.
     additional_kwargs: List[str] = []
     pos_marker: Optional[PositionMarker]
+    # NOTE: Cache key is generated by the SegmentMetaclass
+    _cache_key: str
+    # _preface_modifier used in ._preface()
+    _preface_modifier: str = ""
 
     def __init__(
         self,
@@ -475,9 +460,10 @@ class BaseSegment(metaclass=SegmentMetaclass):
     ) -> List[Tuple["RawSegment", List[PathStep]]]:
         """Returns a list of raw segments in this segment with the ancestors."""
         buffer = []
+        code_idxs = tuple(idx for idx, seg in enumerate(self.segments) if seg.is_code)
         for idx, seg in enumerate(self.segments):
             # If it's a raw, yield it with this segment as the parent
-            new_step = [PathStep(self, idx, len(self.segments))]
+            new_step = [PathStep(self, idx, len(self.segments), code_idxs)]
             if seg.is_type("raw"):
                 buffer.append((seg, new_step))
             # If it's not, recurse - prepending self to the ancestor stack
@@ -518,12 +504,12 @@ class BaseSegment(metaclass=SegmentMetaclass):
         # * Source slice not empty: If it's empty, this means it doesn't appear
         #   in the source, e.g. because it is new code generated by a lint fix.
         #   Return False for these.
-        # * Source string doesn't match raw segment contents. This can only
-        #   happen if templating is involved.
+        # * It's not a literal slice. If it's a literal and has size then it's
+        #   not templated.
         assert self.pos_marker
         return (
             self.pos_marker.source_slice.start != self.pos_marker.source_slice.stop
-            and self.raw != self.pos_marker.source_str()
+            and not self.pos_marker.is_literal()
         )
 
     # ################ STATIC METHODS
@@ -533,8 +519,7 @@ class BaseSegment(metaclass=SegmentMetaclass):
         """Return a tuple structure from an iterable of segments."""
         return tuple(seg.to_tuple(**kwargs) for seg in segs)
 
-    @staticmethod
-    def _suffix():
+    def _suffix(self) -> str:
         """Return any extra output required at the end when logging.
 
         NB Override this for specific subclasses if we want extra output.
@@ -640,30 +625,47 @@ class BaseSegment(metaclass=SegmentMetaclass):
         # and backward.
         segment_buffer: Tuple["BaseSegment", ...] = ()
         for idx, segment in enumerate(segments):
-            repositioned_seg = copy(segment)
+            repositioned_seg = segment.copy()
             # Fill any that don't have a position.
             if not repositioned_seg.pos_marker:
                 # Can we get a position from the previous?
+                start_point = None
                 if idx > 0:
                     prev_seg = segment_buffer[idx - 1]
                     # Given we're going back in the buffer we should
                     # have set the position marker for everything already
                     # in there. This is mostly a hint to mypy.
                     assert prev_seg.pos_marker
-                    repositioned_seg.pos_marker = prev_seg.pos_marker.end_point_marker()
+                    start_point = prev_seg.pos_marker.end_point_marker()
                 # Can we get it from the parent?
                 elif parent_pos:
-                    repositioned_seg.pos_marker = parent_pos.start_point_marker()
-                # Search forward for a following one, if we have to?
-                else:
-                    for fwd_seg in segments[idx + 1 :]:
-                        if fwd_seg.pos_marker:
-                            repositioned_seg.pos_marker = (
-                                fwd_seg.pos_marker.start_point_marker()
-                            )
-                            break
-                    else:  # pragma: no cover
-                        raise ValueError("Unable to position new segment")
+                    start_point = parent_pos.start_point_marker()
+
+                # Search forward for the end point.
+                end_point = None
+                for fwd_seg in segments[idx + 1 :]:
+                    if fwd_seg.pos_marker:
+                        # NOTE: Use raw segments because it's more reliable.
+                        end_point = fwd_seg.raw_segments[
+                            0
+                        ].pos_marker.start_point_marker()
+                        break
+
+                if start_point and end_point and start_point != end_point:
+                    # We should construct a wider position marker.
+                    repositioned_seg.pos_marker = PositionMarker.from_points(
+                        start_point,
+                        end_point,
+                    )
+                # If we have start point (or if they were equal above),
+                # just apply start point.
+                elif start_point:
+                    repositioned_seg.pos_marker = start_point
+                # Do we have an end?
+                elif end_point:
+                    repositioned_seg.pos_marker = end_point
+                else:  # pragma: no cover
+                    raise ValueError("Unable to position new segment")
 
             assert repositioned_seg.pos_marker  # hint for mypy
             # Update the working position.
@@ -690,7 +692,7 @@ class BaseSegment(metaclass=SegmentMetaclass):
     # ################ CLASS METHODS
 
     @classmethod
-    def simple(cls, parse_context: ParseContext, crumbs=None) -> Optional[List[str]]:
+    def simple(cls, parse_context: ParseContext, crumbs=None):
         """Does this matcher support an uppercase hash matching route?
 
         This should be true if the MATCH grammar is simple. Most more
@@ -704,6 +706,14 @@ class BaseSegment(metaclass=SegmentMetaclass):
             # simple.
             return None
 
+    @classmethod
+    def cache_key(cls) -> str:
+        """Return the cache key for this segment definition.
+
+        NOTE: The key itself is generated on _definition_ by the metaclass.
+        """
+        return cls._cache_key
+
     @classmethod
     def is_optional(cls):
         """Return True if this segment is optional.
@@ -845,7 +855,7 @@ class BaseSegment(metaclass=SegmentMetaclass):
         """Returns the preamble to any logging."""
         padded_type = "{padding}{modifier}{type}".format(
             padding=" " * (ident * tabsize),
-            modifier="[META] " if self.is_meta else "",
+            modifier=self._preface_modifier,
             type=self.get_type() + ":",
         )
         preface = "{pos:20}|{padded_type:60}  {suffix}".format(
@@ -887,7 +897,7 @@ class BaseSegment(metaclass=SegmentMetaclass):
 
         self._recalculate_caches()
 
-    def get_start_point_marker(self):
+    def get_start_point_marker(self):  # pragma: no cover
         """Get a point marker at the start of this segment."""
         return self.pos_marker.start_point_marker()
 
@@ -982,6 +992,10 @@ class BaseSegment(metaclass=SegmentMetaclass):
     def copy(self):
         """Copy the segment recursively, with appropriate copying of references."""
         new_seg = copy(self)
+        # Position markers are immutable, and it's important that we keep
+        # a reference to the same TemplatedFile, so keep the same position
+        # marker.
+        new_seg.pos_marker = self.pos_marker
         if self.segments:
             new_seg.segments = tuple(seg.copy() for seg in self.segments)
         return new_seg
@@ -1006,7 +1020,7 @@ class BaseSegment(metaclass=SegmentMetaclass):
         return [item for s in self.segments for item in s.raw_segments]
 
     def iter_segments(self, expanding=None, pass_through=False):
-        """Iterate raw segments, optionally expanding some children."""
+        """Iterate segments, optionally expanding some children."""
         for s in self.segments:
             if expanding and s.is_type(*expanding):
                 yield from s.iter_segments(
@@ -1110,7 +1124,7 @@ class BaseSegment(metaclass=SegmentMetaclass):
                         no_recursive_seg_type=no_recursive_seg_type,
                     )
 
-    def path_to(self, other) -> List[PathStep]:
+    def path_to(self, other: "BaseSegment") -> List[PathStep]:
         """Given a segment which is assumed within self, get the intermediate segments.
 
         Returns:
@@ -1131,7 +1145,7 @@ class BaseSegment(metaclass=SegmentMetaclass):
             return []  # pragma: no cover
 
         # Are we in the right ballpark?
-        # NB: Comparisons have a higher precedence than `not`.
+        # NOTE: Comparisons have a higher precedence than `not`.
         if not self.get_start_loc() <= other.get_start_loc() <= self.get_end_loc():
             return []
 
@@ -1139,9 +1153,11 @@ class BaseSegment(metaclass=SegmentMetaclass):
         if not self.segments:
             return []
 
+        # Check code idxs
+        code_idxs = tuple(idx for idx, seg in enumerate(self.segments) if seg.is_code)
         # Check through each of the child segments
         for idx, seg in enumerate(self.segments):
-            step = PathStep(self, idx, len(self.segments))
+            step = PathStep(self, idx, len(self.segments), code_idxs)
             # Have we found the target?
             if seg is other:
                 return [step]
@@ -1361,11 +1377,22 @@ class BaseSegment(metaclass=SegmentMetaclass):
 
                                 # We're doing a replacement (it could be a single
                                 # segment or an iterable)
+                                consumed_pos = False
                                 if isinstance(f.edit, BaseSegment):
                                     seg_buffer.append(f.edit)  # pragma: no cover TODO?
                                 else:
                                     for s in f.edit:
                                         seg_buffer.append(s)
+                                        # If one of them has the same raw representation
+                                        # then the first that matches gets to take the
+                                        # original position marker.
+                                        if (
+                                            f.edit_type == "replace"
+                                            and s.raw == seg.raw
+                                            and not consumed_pos
+                                        ):
+                                            seg_buffer[-1].pos_marker = seg.pos_marker
+                                            consumed_pos = True
 
                                 if f.edit_type == "create_before":
                                     # in the case of a creation before, also add this
@@ -1407,46 +1434,19 @@ class BaseSegment(metaclass=SegmentMetaclass):
                 seg_buffer.append(s)
                 seg_buffer.extend(after)
 
-            before = []
-            after = []
-            # If there's a parse grammar and this segment is not allowed to
-            # start or end with non-code, check for (and fix) misplaced
-            # segments. The reason for the parse grammar check is autofix if and
-            # only if parse() would've complained, and it has the same parse
-            # grammar check prior to checking can_start_end_non_code.
-            if r.parse_grammar and not r.can_start_end_non_code:
-                idx_non_code = self._find_start_or_end_non_code(seg_buffer)
-                # Are there misplaced segments from a fix?
-                if idx_non_code is not None:
-                    # Yes. Fix the misplaced segments: Do not include them
-                    # in the new segment's children. Instead, return them to the
-                    # caller, which will place them *adjacent to* the new
-                    # segment, in effect, bubbling them up to the tree to a
-                    # valid location.
-                    save_seg_buffer = list(seg_buffer)
-                    before.extend(
-                        takewhile(
-                            lambda seg: not self._is_code_or_meta(seg), seg_buffer
-                        )
-                    )
-                    seg_buffer = seg_buffer[len(before) :]
-                    after.extend(
-                        takewhile(
-                            lambda seg: not self._is_code_or_meta(seg),
-                            reversed(seg_buffer),
-                        )
-                    )
-                    after.reverse()
-                    seg_buffer = seg_buffer[: len(seg_buffer) - len(after)]
-                    assert before + seg_buffer + after == save_seg_buffer
-                    linter_logger.debug(
-                        "After applying fixes, segment %s violated "
-                        "'can_start_end_non_code=False' constraint. Autofixing, "
-                        "before=%s, after=%s",
-                        self,
-                        before,
-                        after,
-                    )
+            # After fixing we should be able to rely on whitespace being
+            # inserted in appropriate places. That logic now lives in
+            # `BaseRule._choose_anchor_segment()`, rather than here.
+
+            # Rather than fix that here, we simply assert that it has been
+            # done. This will raise issues in testing, but shouldn't in use.
+            if r.parse_grammar and not r.can_start_end_non_code and seg_buffer:
+                assert not self._find_start_or_end_non_code(seg_buffer), (
+                    "Found inappropriate fix application: inappropriate "
+                    "whitespace positioning. Post `_choose_anchor_segment`. "
+                    "Please report this issue on GitHub with your SQL query. "
+                )
+
             # Reform into a new segment
             r = r.__class__(
                 # Realign the segments within
@@ -1560,11 +1560,15 @@ class BaseSegment(metaclass=SegmentMetaclass):
             # First yield any source fixes
             yield from self._iter_source_fix_patches(templated_file)
             # Then yield the position in the source file and the patch
-            yield FixPatch.infer_from_template(
-                self.pos_marker.templated_slice,
-                self.raw,
+            yield FixPatch(
+                source_slice=self.pos_marker.source_slice,
+                templated_slice=self.pos_marker.templated_slice,
                 patch_category="literal",
-                templated_file=templated_file,
+                fixed_raw=self.raw,
+                templated_str=templated_file.templated_str[
+                    self.pos_marker.templated_slice
+                ],
+                source_str=templated_file.source_str[self.pos_marker.source_slice],
             )
         # Can we go deeper?
         elif not self.segments:
@@ -1603,6 +1607,7 @@ class BaseSegment(metaclass=SegmentMetaclass):
 
                 # If we get here, then we know it's an original. Check for deletions at
                 # the point before this segment (vs the TEMPLATED).
+                # Deletions in this sense could also mean source consumption.
                 start_diff = segment.pos_marker.templated_slice.start - templated_idx
 
                 # Check to see whether there's a discontinuity before the current
@@ -1610,15 +1615,29 @@ class BaseSegment(metaclass=SegmentMetaclass):
                 if start_diff > 0 or insert_buff:
                     # If we have an insert buffer, then it's an edit, otherwise a
                     # deletion.
-                    yield FixPatch.infer_from_template(
-                        slice(
-                            segment.pos_marker.templated_slice.start
-                            - max(start_diff, 0),
-                            segment.pos_marker.templated_slice.start,
+
+                    # For the start of the next segment, we need the position of the
+                    # first raw, not the pos marker of the whole thing. That accounts
+                    # better for loops.
+                    first_segment_pos = segment.raw_segments[0].pos_marker
+                    yield FixPatch(
+                        # Whether the source slice is zero depends on the start_diff.
+                        # A non-zero start diff implies a deletion, or more likely
+                        # a consumed element of the source. We can use the tracking
+                        # markers from the last segment to recreate where this element
+                        # should be inserted in both source and template.
+                        source_slice=slice(
+                            source_idx,
+                            first_segment_pos.source_slice.start,
+                        ),
+                        templated_slice=slice(
+                            templated_idx,
+                            first_segment_pos.templated_slice.start,
                         ),
-                        insert_buff,
                         patch_category="mid_point",
-                        templated_file=templated_file,
+                        fixed_raw=insert_buff,
+                        templated_str="",
+                        source_str="",
                     )
 
                     insert_buff = ""
@@ -1691,7 +1710,7 @@ class BracketedSegment(BaseSegment):
         super().__init__(*args, **kwargs)
 
     @classmethod
-    def simple(cls, parse_context: ParseContext, crumbs=None) -> Optional[List[str]]:
+    def simple(cls, parse_context: ParseContext, crumbs=None):
         """Simple methods for bracketed and the persistent brackets."""
         start_brackets = [
             start_bracket
@@ -1700,12 +1719,17 @@ class BracketedSegment(BaseSegment):
             )
             if persistent
         ]
-        start_simple = []
+        simple_raws = set()
         for ref in start_brackets:
-            start_simple += parse_context.dialect.ref(ref).simple(
+            bracket_simple = parse_context.dialect.ref(ref).simple(
                 parse_context, crumbs=crumbs
             )
-        return start_simple
+            assert bracket_simple, "All bracket segments must support simple."
+            assert bracket_simple[0], "All bracket segments must support raw simple."
+            # NOTE: By making this assumption we don't have to handle the "typed"
+            # simple here.
+            simple_raws.update(bracket_simple[0])
+        return frozenset(simple_raws), frozenset()
 
     @classmethod
     def match(
diff --git a/src/sqlfluff/core/parser/segments/meta.py b/src/sqlfluff/core/parser/segments/meta.py
index 238325c..7bfb0f0 100644
--- a/src/sqlfluff/core/parser/segments/meta.py
+++ b/src/sqlfluff/core/parser/segments/meta.py
@@ -8,6 +8,8 @@ from sqlfluff.core.parser.segments.raw import RawSegment, SourceFix
 from sqlfluff.core.parser.context import ParseContext
 from typing import Optional, List
 
+from sqlfluff.core.templaters.base import TemplatedFile
+
 
 class MetaSegment(RawSegment):
     """A segment which is empty but indicates where something should be."""
@@ -16,7 +18,11 @@ class MetaSegment(RawSegment):
     _is_code = False
     _template = "<unset>"
     indent_val = 0
+    # Implicit indents are to be considered _taken_ unless
+    # closed on the same line.
+    is_implicit = False
     is_meta = True
+    _preface_modifier = "[META] "
 
     def __init__(
         self,
@@ -40,8 +46,7 @@ class MetaSegment(RawSegment):
         self.is_template = is_template
         self.block_uuid = block_uuid
 
-    @staticmethod
-    def _suffix():
+    def _suffix(self):
         """Return any extra output required at the end when logging.
 
         Meta classes have not much to say here so just stay blank.
@@ -59,7 +64,7 @@ class MetaSegment(RawSegment):
         )
 
     @classmethod
-    def simple(cls, parse_context: ParseContext, crumbs=None) -> Optional[List[str]]:
+    def simple(cls, parse_context: ParseContext, crumbs=None):
         """Does this matcher support an uppercase hash matching route?
 
         This should be true if the MATCH grammar is simple. Most more
@@ -103,6 +108,27 @@ class Indent(MetaSegment):
     type = "indent"
     indent_val = 1
 
+    def _suffix(self) -> str:
+        """If present, output the block uuid."""
+        return f"[Block: {self.block_uuid.hex[:6]!r}]" if self.block_uuid else ""
+
+
+class ImplicitIndent(Indent):
+    """A variant on the indent, that is considered *taken* unless closed in line.
+
+    This is primarily for facilitating constructions which behave a little
+    like hanging indents, without the complicated indentation spacing.
+
+    .. code-block:: sql
+        SELECT *
+        FROM foo
+        WHERE a  -- The theoretical indent between WHERE and "a" is implicit.
+            AND b
+    """
+
+    _preface_modifier = "[META] (implicit) "
+    is_implicit = True
+
 
 class Dedent(Indent):
     """A segment which is empty but indicates where an dedent should be.
@@ -144,7 +170,8 @@ class TemplateSegment(MetaSegment):
         block_uuid: Optional[UUID] = None,
     ):
         """Initialise a placeholder with the source code embedded."""
-        if not source_str:  # pragma: no cover
+        # NOTE: Empty string is ok, None is not.
+        if source_str is None:  # pragma: no cover
             raise ValueError("Cannot instantiate TemplateSegment without a source_str.")
         self.source_str = source_str
         self.block_type = block_type
@@ -153,9 +180,35 @@ class TemplateSegment(MetaSegment):
             pos_marker=pos_marker, source_fixes=source_fixes, block_uuid=block_uuid
         )
 
-    def _suffix(self):
+    def _suffix(self) -> str:
         """Also output what it's a placeholder for."""
-        return f"[Type: {self.block_type!r}, Raw: {self.source_str!r}]"
+        return (
+            f"[Type: {self.block_type!r}, Raw: {self.source_str!r}"
+            + (f", Block: {self.block_uuid.hex[:6]!r}" if self.block_uuid else "")
+            + "]"
+        )
+
+    @classmethod
+    def from_slice(
+        cls,
+        source_slice: slice,
+        templated_slice: slice,
+        block_type: str,
+        templated_file: TemplatedFile,
+        block_uuid: Optional[UUID] = None,
+    ):
+        """Construct template segment from slice of a source file."""
+        pos_marker = PositionMarker(
+            source_slice,
+            templated_slice,
+            templated_file,
+        )
+        return cls(
+            pos_marker=pos_marker,
+            source_str=templated_file.source_str[source_slice],
+            block_type=block_type,
+            block_uuid=block_uuid,
+        )
 
     def to_tuple(self, code_only=False, show_raw=False, include_meta=False):
         """Return a tuple structure from this segment.
@@ -170,7 +223,10 @@ class TemplateSegment(MetaSegment):
             return (self.get_type(), self.raw)
 
     def edit(
-        self, raw: Optional[str] = None, source_fixes: Optional[List[SourceFix]] = None
+        self,
+        raw: Optional[str] = None,
+        source_fixes: Optional[List[SourceFix]] = None,
+        source_str: Optional[str] = None,
     ):
         """Create a new segment, with exactly the same position but different content.
 
@@ -186,10 +242,17 @@ class TemplateSegment(MetaSegment):
             raise ValueError(
                 "Cannot set raw of a template placeholder!"
             )  # pragma: no cover
+
+        if source_fixes or self.source_fixes:
+            sf = (source_fixes or []) + (self.source_fixes + [])
+        else:  # pragma: no cover
+            # There's _usually_ a source fix if we're editing a templated
+            # segment - but not necessarily guaranteed.
+            sf = None
         return self.__class__(
             pos_marker=self.pos_marker,
-            source_str=self.source_str,
+            source_str=source_str if source_str is not None else self.source_str,
             block_type=self.block_type,
-            source_fixes=source_fixes or self.source_fixes,
+            source_fixes=sf,
             block_uuid=self.block_uuid,
         )
diff --git a/src/sqlfluff/core/parser/segments/raw.py b/src/sqlfluff/core/parser/segments/raw.py
index c8f3a4b..b71fc50 100644
--- a/src/sqlfluff/core/parser/segments/raw.py
+++ b/src/sqlfluff/core/parser/segments/raw.py
@@ -58,12 +58,16 @@ class RawSegment(BaseSegment):
         self._source_fixes = source_fixes
         # UUID for matching
         self.uuid = uuid or uuid4()
-
-    def __repr__(self):
-        return "<{}: ({}) {!r}>".format(
+        self.representation = "<{}: ({}) {!r}>".format(
             self.__class__.__name__, self.pos_marker, self.raw
         )
 
+    def __repr__(self):
+        # This is calculated at __init__, because all elements are immutable
+        # and this was previously recalculating the pos marker,
+        # and became very expensive
+        return self.representation
+
     def __setattr__(self, key, value):
         """Overwrite BaseSegment's __setattr__ with BaseSegment's superclass."""
         super(BaseSegment, self).__setattr__(key, value)
diff --git a/src/sqlfluff/core/plugin/hookspecs.py b/src/sqlfluff/core/plugin/hookspecs.py
index 98ada24..e3cc278 100644
--- a/src/sqlfluff/core/plugin/hookspecs.py
+++ b/src/sqlfluff/core/plugin/hookspecs.py
@@ -15,6 +15,7 @@ class PluginSpec:
     def get_rules(self):
         """Get plugin rules."""
 
+    @hookspec
     @abstractmethod
     def load_default_config(self) -> dict:
         """Loads the default configuration for the plugin."""
diff --git a/src/sqlfluff/core/plugin/lib.py b/src/sqlfluff/core/plugin/lib.py
index eec2f85..61ec280 100644
--- a/src/sqlfluff/core/plugin/lib.py
+++ b/src/sqlfluff/core/plugin/lib.py
@@ -1,21 +1,30 @@
 """Base implementation for the plugin."""
 
 import os.path
+
+from typing import List, Type
+
 from sqlfluff.core.config import ConfigLoader
 from sqlfluff.core.plugin import hookimpl
 from sqlfluff.core.rules.config_info import STANDARD_CONFIG_INFO_DICT
 from sqlfluff.core.rules.loader import get_rules_from_path
-from sqlfluff.core.templaters import core_templaters
+from sqlfluff.core.rules import BaseRule
+from sqlfluff.core.templaters import core_templaters, RawTemplater
 
 
 @hookimpl
-def get_rules():
-    """Get plugin rules."""
+def get_rules() -> List[Type[BaseRule]]:
+    """Get plugin rules.
+
+    NOTE: All standard rules will eventually be loaded as
+    plugins and so before 2.0.0, once all legacy plugin definitions
+    are migrated, this function will be amended to return no rules.
+    """
     return get_rules_from_path()
 
 
 @hookimpl
-def get_templaters():
+def get_templaters() -> List[Type[RawTemplater]]:
     """Get templaters."""
     return core_templaters()
 
diff --git a/src/sqlfluff/core/rules/__init__.py b/src/sqlfluff/core/rules/__init__.py
index c4ac887..8927fb8 100644
--- a/src/sqlfluff/core/rules/__init__.py
+++ b/src/sqlfluff/core/rules/__init__.py
@@ -2,6 +2,7 @@
 
 from sqlfluff.core.rules.base import (
     RuleSet,
+    RulePack,
     BaseRule,
     LintResult,
     LintFix,
@@ -40,6 +41,7 @@ def get_ruleset(name: str = "standard") -> RuleSet:
 __all__ = (
     "get_ruleset",
     "RuleSet",
+    "RulePack",
     "BaseRule",
     "LintResult",
     "LintFix",
diff --git a/src/sqlfluff/core/rules/base.py b/src/sqlfluff/core/rules/base.py
index 90cba7b..ed6e093 100644
--- a/src/sqlfluff/core/rules/base.py
+++ b/src/sqlfluff/core/rules/base.py
@@ -16,10 +16,13 @@ missing.
 
 import bdb
 import copy
+from dataclasses import dataclass
 import fnmatch
+from itertools import chain
 import logging
 import pathlib
 import regex
+import re
 from typing import (
     cast,
     Iterable,
@@ -29,22 +32,27 @@ from typing import (
     Tuple,
     Union,
     Any,
+    Dict,
+    Type,
+    DefaultDict,
+    Iterator,
 )
-from collections import namedtuple
+from collections import namedtuple, defaultdict
 
-from sqlfluff.core.cached_property import cached_property
-from sqlfluff.core.config import FluffConfig
+from sqlfluff.core.config import FluffConfig, split_comma_separated_string
 
 from sqlfluff.core.linter import LintedFile, NoQaDirective
 from sqlfluff.core.parser import BaseSegment, PositionMarker, RawSegment
 from sqlfluff.core.dialects import Dialect
-from sqlfluff.core.errors import SQLLintError
+from sqlfluff.core.errors import SQLLintError, SQLFluffUserError
+from sqlfluff.core.parser.segments.base import SourceFix
 from sqlfluff.core.rules.context import RuleContext
 from sqlfluff.core.rules.crawlers import BaseCrawler
+from sqlfluff.core.rules.config_info import get_config_info
 from sqlfluff.core.templaters.base import RawFileSlice, TemplatedFile
 
 # The ghost of a rule (mostly used for testing)
-RuleGhost = namedtuple("RuleGhost", ["code", "description"])
+RuleGhost = namedtuple("RuleGhost", ["code", "name", "description"])
 
 # Instantiate the rules logger
 rules_logger = logging.getLogger("sqlfluff.rules")
@@ -78,6 +86,9 @@ class LintResult:
             identified as part of this result. This will override the
             description of the rule as what gets reported to the user
             with the problem if provided.
+        source (:obj:`str`, optional): A string identifier for what
+            generated the result. Within larger libraries like reflow this
+            can be useful for tracking where a result came from.
 
     """
 
@@ -86,7 +97,8 @@ class LintResult:
         anchor: Optional[BaseSegment] = None,
         fixes: Optional[List["LintFix"]] = None,
         memory=None,
-        description=None,
+        description: Optional[str] = None,
+        source: Optional[str] = None,
     ):
         # An anchor of none, means no issue
         self.anchor = anchor
@@ -98,6 +110,8 @@ class LintResult:
         self.memory = memory
         # store a description_override for later
         self.description = description
+        # Optional code for where the result came from
+        self.source: str = source or ""
 
     def __repr__(self):
         if not self.anchor:
@@ -105,6 +119,11 @@ class LintResult:
         # The "F" at the end is short for "fixes", to indicate how many there are.
         fix_coda = f"+{len(self.fixes)}F" if self.fixes else ""
         if self.description:
+            if self.source:
+                return (
+                    f"LintResult({self.description} [{self.source}]"
+                    f": {self.anchor}{fix_coda})"
+                )
             return f"LintResult({self.description}: {self.anchor}{fix_coda})"
         return f"LintResult({self.anchor}{fix_coda})"
 
@@ -142,7 +161,6 @@ class LintFix:
             `create` fixes, this holds iterable of segments that provided
             code. IMPORTANT: The linter uses this to prevent copying material
             from templated areas.
-
     """
 
     def __init__(
@@ -250,11 +268,28 @@ class LintFix:
         """
         if not self.edit_type == other.edit_type:
             return False
-        if not self.anchor == other.anchor:
+        # For checking anchor equality, first check types.
+        if not self.anchor.class_types == other.anchor.class_types:
             return False
-        if not self.edit == other.edit:
+        # If types match, check uuids to see if they're the same original segment.
+        if self.anchor.uuid != other.anchor.uuid:
             return False
-        return True  # pragma: no cover TODO?
+        # Then compare edits, here we only need to check the raw and source
+        # fixes (positions are meaningless).
+        # Only do this if we have edits.
+        if self.edit:
+            # 1. Check lengths
+            if len(self.edit) != len(other.edit):
+                return False  # pragma: no cover
+            # 2. Zip and compare
+            for a, b in zip(self.edit, other.edit):
+                # Check raws
+                if a.raw != b.raw:
+                    return False
+                # Check source fixes
+                if a.source_fixes != b.source_fixes:
+                    return False
+        return True
 
     @classmethod
     def delete(cls, anchor_segment: BaseSegment) -> "LintFix":
@@ -279,7 +314,12 @@ class LintFix:
         source: Optional[Iterable[BaseSegment]] = None,
     ) -> "LintFix":
         """Create edit segments before the supplied anchor segment."""
-        return cls("create_before", anchor_segment, edit_segments, source)
+        return cls(
+            "create_before",
+            anchor_segment,
+            edit_segments,
+            source,
+        )
 
     @classmethod
     def create_after(
@@ -289,7 +329,12 @@ class LintFix:
         source: Optional[Iterable[BaseSegment]] = None,
     ) -> "LintFix":
         """Create edit segments after the supplied anchor segment."""
-        return cls("create_after", anchor_segment, edit_segments, source)
+        return cls(
+            "create_after",
+            anchor_segment,
+            edit_segments,
+            source,
+        )
 
     def get_fix_slices(
         self, templated_file: TemplatedFile, within_only: bool
@@ -332,6 +377,41 @@ class LintFix:
             # We return an empty set because this edit doesn't touch anything
             # in the source.
             return set()
+        elif (
+            self.edit_type == "replace"
+            and all(edit.is_type("raw") for edit in cast(List[RawSegment], self.edit))
+            and all(edit._source_fixes for edit in cast(List[RawSegment], self.edit))
+        ):
+            # As an exception to the general rule about "replace" fixes (where
+            # they're only safe if they don't touch a templated section at all),
+            # source-only fixes are different. This clause handles that exception.
+
+            # So long as the fix is *purely* source-only we can assume that the
+            # rule has done the relevant due diligence on what it's editing in
+            # the source and just yield the source slices directly.
+
+            # More complicated fixes that are a blend or source and templated
+            # fixes are currently not supported but this (mostly because they've
+            # not arisen yet!), so further work would be required to support them
+            # elegantly.
+            rules_logger.debug("Source only fix.")
+            source_edit_slices = [
+                fix.source_slice
+                # We can assume they're all raw and all have source fixes, because we
+                # check that above.
+                for fix in chain.from_iterable(
+                    cast(List[SourceFix], edit._source_fixes)
+                    for edit in cast(List[RawSegment], self.edit)
+                )
+            ]
+
+            if len(source_edit_slices) > 1:  # pragma: no cover
+                raise NotImplementedError(
+                    "Unable to handle multiple source only slices."
+                )
+            return set(
+                templated_file.raw_slices_spanning_source_slice(source_edit_slices[0])
+            )
 
         # TRICKY: For creations at the end of the file, there won't be an
         # existing slice. In this case, the function adds file_end_slice to the
@@ -400,7 +480,171 @@ class LintFix:
 EvalResultType = Union[LintResult, List[LintResult], None]
 
 
-class BaseRule:
+class RuleMetaclass(type):
+    """The metaclass for rules.
+
+    This metaclass provides provides auto-enrichment of the
+    rule docstring so that examples, groups, aliases and
+    names are added.
+
+    The reason we enrich the docstring is so that it can be
+    picked up by autodoc and all be displayed in the sqlfluff
+    docs.
+    """
+
+    # Precompile the regular expressions
+    _doc_search_regex = re.compile(
+        "(\\s{4}\\*\\*Anti-pattern\\*\\*|\\s{4}\\.\\. note::|"
+        "\\s\\s{4}\\*\\*Configuration\\*\\*)",
+        flags=re.MULTILINE,
+    )
+    _valid_classname_regex = regex.compile(r"Rule_?([A-Z]{1}[a-zA-Z]+)?_([A-Z0-9]{4})")
+    _valid_rule_name_regex = regex.compile(r"[a-z][a-z\.\_]+")
+
+    def _populate_code_and_description(mcs, name, class_dict):
+        """Extract and validate the rule code & description.
+
+        We expect that rules are defined as classes with the name `Rule_XXXX`
+        where `XXXX` is of the form `LLNN`, where L is a letter and N is a
+        two digit number. For backward compatibility we also still support
+        the legacy format of LNNN i.e. a single letter and three digit number.
+
+        The two letters should be indicative of the grouping and focus of
+        the rule. e.g. capitalisation rules have the code CP for CaPitalisation.
+
+        If this receives classes by any other name, then it will raise a
+        :exc:`ValueError`.
+        """
+        rule_name_match = mcs._valid_classname_regex.match(name)
+        # Validate the name
+        if not rule_name_match:  # pragma: no cover
+            raise SQLFluffUserError(
+                f"Tried to define rule class with "
+                f"unexpected format: {name}. Format should be: "
+                "'Rule_PluginName_LL23' (for plugins) or "
+                "`Rule_LL23` (for core rules)."
+            )
+
+        plugin_name, code = rule_name_match.groups()
+        # If the docstring is multiline, then we extract just summary.
+        description = class_dict["__doc__"].replace("``", "'").split("\n")[0]
+        if plugin_name:
+            code = f"{plugin_name}_{code}"
+
+        class_dict["code"] = code
+        class_dict["description"] = description
+
+        return class_dict
+
+    def _populate_docstring(mcs, name, class_dict):
+        """Enrich the docstring in the class_dict.
+
+        This takes the various defined values in the BaseRule class
+        and uses them to populate documentation in the final class
+        docstring so that it can be displayed in the sphinx docs.
+        """
+        # Ensure that there _is_ a docstring.
+        assert (
+            "__doc__" in class_dict
+        ), f"Tried to define rule {name!r} without docstring."
+
+        # Build up a buffer of entries to add to the docstring.
+        fix_docs = (
+            "    This rule is ``sqlfluff fix`` compatible.\n\n"
+            if class_dict.get("is_fix_compatible", False)
+            else ""
+        )
+        name_docs = (
+            f"    **Name**: ``{class_dict['name']}``\n\n"
+            if class_dict.get("name", "")
+            else ""
+        )
+        alias_docs = (
+            ("    **Aliases**: ``" + "``, ``".join(class_dict["aliases"]) + "``\n\n")
+            if class_dict.get("aliases", [])
+            else ""
+        )
+        groups_docs = (
+            ("    **Groups**: ``" + "``, ``".join(class_dict["groups"]) + "``\n\n")
+            if class_dict.get("groups", [])
+            else ""
+        )
+
+        config_docs = ""
+        if class_dict.get("config_keywords", []):
+            config_docs = "\n    **Configuration**\n"
+            config_info = get_config_info()
+            for keyword in sorted(class_dict["config_keywords"]):
+                try:
+                    info_dict = config_info[keyword]
+                except KeyError:  # pragma: no cover
+                    raise KeyError(
+                        "Config value {!r} for rule {} is not configured in "
+                        "`config_info`.".format(keyword, name)
+                    )
+                config_docs += "\n    * ``{}``: {}".format(
+                    keyword, info_dict["definition"]
+                )
+                if (
+                    config_docs[-1] != "."
+                    and config_docs[-1] != "?"
+                    and config_docs[-1] != "\n"
+                ):
+                    config_docs += "."
+                if "validation" in info_dict:
+                    config_docs += " Must be one of ``{}``.".format(
+                        info_dict["validation"]
+                    )
+            config_docs += "\n"
+
+        all_docs = fix_docs + name_docs + alias_docs + groups_docs + config_docs
+        # Modify the docstring using the search regex.
+        class_dict["__doc__"] = mcs._doc_search_regex.sub(
+            f"\n\n{all_docs}\n\n\\1", class_dict["__doc__"], count=1
+        )
+        # If the inserted string is not now in the docstring - append it on
+        # the end. This just means the regex didn't find a better place to
+        # put it.
+        if all_docs not in class_dict["__doc__"]:
+            class_dict["__doc__"] += f"\n\n{all_docs}"
+
+        # Return the modified class_dict
+        return class_dict
+
+    def __new__(mcs, name, bases, class_dict):
+        """Generate a new class."""
+        # Optionally, groups may be inherited. At this stage of initialisation
+        # they won't have been. Check parent classes if they exist.
+        # names, aliases and description are less appropriate to inherit.
+        # NOTE: This applies in particular to CP02, which inherits all groups
+        # from CP01. If we don't do this, those groups don't show in the docs.
+        for base in reversed(bases):
+            if "groups" in class_dict:
+                break
+            elif base.groups:
+                class_dict["groups"] = base.groups
+                break
+
+        class_dict = mcs._populate_docstring(mcs, name, class_dict)
+        # Don't try and infer code and description for the base class
+        if bases:
+            class_dict = mcs._populate_code_and_description(mcs, name, class_dict)
+        # Validate rule names
+        rule_name = class_dict.get("name", "")
+        if rule_name:
+            if not mcs._valid_rule_name_regex.match(rule_name):
+                raise SQLFluffUserError(
+                    f"Tried to define rule with unexpected "
+                    f"name format: {rule_name}. Rule names should be lowercase "
+                    "and snake_case with optional `.` characters to indicate "
+                    "a namespace or grouping. e.g. `layout.spacing`."
+                )
+
+        # Use the stock __new__ method now we've adjusted the docstring.
+        return super().__new__(mcs, name, bases, class_dict)
+
+
+class BaseRule(metaclass=RuleMetaclass):
     """The base class for a rule.
 
     Args:
@@ -415,7 +659,15 @@ class BaseRule:
     _works_on_unparsable = True
     _adjust_anchors = False
     targets_templated = False
-
+    # Some fix routines do their own checking for whether their fixes
+    # are safe around templated elements. For those - the default
+    # safety checks might be inappropriate. In those cases, set
+    # template_safe_fixes to True.
+    template_safe_fixes = False
+
+    # Config settings supported for this rule.
+    # See config_info.py for supported values.
+    config_keywords: List[str] = []
     # Lint loop / crawl behavior. When appropriate, rules can (and should)
     # override these values to make linting faster.
     crawl_behaviour: BaseCrawler
@@ -425,6 +677,29 @@ class BaseRule:
     # - On the first pass of the main phase
     # - In a second linter pass after the main phase
     lint_phase = "main"
+    # Groups attribute to be overwritten.
+    groups: Tuple[str, ...] = ()
+    # Name attribute to be overwritten.
+    # NOTE: for backward compatibility we should handle the case
+    # where no name is set gracefully.
+    name: str = ""
+    # Optional set of aliases for the rule. Most often used for old codes which
+    # referred to this rule.
+    aliases: Tuple[str, ...] = ()
+
+    # NOTE: code and description are provided here as hints, but should not
+    # be set directly. They are set automatically by the metaclass based on
+    # the class _name_ when defined.
+    code: str
+    description: str
+
+    # Should we document this rule as fixable? Used by the metaclass to add
+    # a line to the docstring.
+    is_fix_compatible = False
+
+    # Add comma separated string to Base Rule to ensure that it uses the same
+    # Configuration that is defined in the Config.py file
+    split_comma_separated_string = staticmethod(split_comma_separated_string)
 
     def __init__(self, code, description, **kwargs):
         self.description = description
@@ -438,18 +713,27 @@ class BaseRule:
         # of the rule in the logging.
         self.logger = RuleLoggingAdapter(rules_logger, {"code": code})
         # Validate that declared configuration options exist
-        try:
-            for keyword in self.config_keywords:
-                if keyword not in kwargs.keys():
-                    raise ValueError(
-                        (
-                            "Unrecognized config '{}' for Rule {}. If this "
-                            "is a new option, please add it to "
-                            "`default_config.cfg`"
-                        ).format(keyword, code)
-                    )
-        except AttributeError:
-            self.logger.info(f"No config_keywords defined for {code}")
+        for keyword in self.config_keywords:
+            if keyword not in kwargs.keys():
+                raise ValueError(
+                    (
+                        "Unrecognized config '{}' for Rule {}. If this "
+                        "is a new option, please add it to "
+                        "`default_config.cfg`"
+                    ).format(keyword, code)
+                )
+
+    @classmethod
+    def get_config_ref(cls):
+        """Return the config lookup ref for this rule.
+
+        If a `name` is defined, it's the name - otherwise the code.
+
+        The name is a much more understandable reference and so makes config
+        files more readable. For backward compatibility however we also support
+        the rule code for those without names.
+        """
+        return cls.name if cls.name else cls.code
 
     def _eval(self, context: RuleContext) -> EvalResultType:
         """Evaluate this rule against the current context.
@@ -515,8 +799,12 @@ class BaseRule:
             # Any exception at this point would halt the linter and
             # cause the user to get no results
             except Exception as e:
+                # If a filename is present, include it in the critical exception.
                 self.logger.critical(
-                    f"Applying rule {self.code} threw an Exception: {e}", exc_info=True
+                    f"Applying rule {self.code} to {fname!r} threw an Exception: {e}"
+                    if fname
+                    else f"Applying rule {self.code} threw an Exception: {e}",
+                    exc_info=True,
                 )
                 assert context.segment.pos_marker
                 exception_line, _ = context.segment.pos_marker.source_position()
@@ -571,8 +859,15 @@ class BaseRule:
 
             for lerr in new_lerrs:
                 self.logger.info("!! Violation Found: %r", lerr.description)
-            for lfix in new_fixes:
-                self.logger.info("!! Fix Proposed: %r", lfix)
+            if new_fixes:
+                if not self.is_fix_compatible:  # pragma: no cover
+                    rules_logger.error(
+                        f"Rule {self.code} returned a fix but is not documented as "
+                        "`is_fix_compatible`, you may encounter unusual fixing "
+                        "behaviour. Report this a bug to the developer of this rule."
+                    )
+                for lfix in new_fixes:
+                    self.logger.info("!! Fix Proposed: %r", lfix)
 
             # Consume the new results
             vs += new_lerrs
@@ -588,7 +883,10 @@ class BaseRule:
     def _process_lint_result(
         self, res, templated_file, ignore_mask, new_lerrs, new_fixes, root
     ):
-        self.discard_unsafe_fixes(res, templated_file)
+        # Unless the rule declares that it's already template safe. Do safety
+        # checks.
+        if not self.template_safe_fixes:
+            self.discard_unsafe_fixes(res, templated_file)
         lerr = res.to_linting_error(rule=self)
         ignored = False
         if lerr:
@@ -632,16 +930,6 @@ class BaseRule:
         if not ignored:
             new_fixes.extend(res.fixes)
 
-    @cached_property
-    def indent(self) -> str:
-        """String for a single indent, based on configuration."""
-        self.tab_space_size: int
-        self.indent_unit: str
-
-        tab = "\t"
-        space = " "
-        return space * self.tab_space_size if self.indent_unit == "space" else tab
-
     @staticmethod
     def filter_meta(segments, keep_meta=False):
         """Filter the segments to non-meta.
@@ -679,41 +967,6 @@ class BaseRule:
         # no subsegments to check. Return None.
         return None
 
-    @staticmethod
-    def matches_target_tuples(
-        seg: BaseSegment,
-        target_tuples: List[Tuple[str, str]],
-        parent: Optional[BaseSegment] = None,
-    ):
-        """Does the given segment match any of the given type tuples?"""
-        if seg.raw_upper in [
-            elem[1] for elem in target_tuples if elem[0] == "raw_upper"
-        ]:
-            return True  # pragma: no cover
-        elif seg.is_type(*[elem[1] for elem in target_tuples if elem[0] == "type"]):
-            return True
-        # For parent type checks, there's a higher risk of getting an incorrect
-        # segment, so we add some additional guards. We also only check keywords
-        # as for other types we can check directly rather than using parent
-        elif (
-            not seg.is_meta
-            and not seg.is_comment
-            and not seg.is_templated
-            and not seg.is_whitespace
-            and isinstance(seg, RawSegment)
-            and len(seg.raw) > 0
-            and seg.is_type("keyword")
-            and parent
-            and parent.is_type(
-                *[elem[1] for elem in target_tuples if elem[0] == "parenttype"]
-            )
-        ):
-            # TODO: This clause is much less used post crawler migration.
-            # Consider whether this should be removed once that migration
-            # is complete.
-            return True  # pragma: no cover
-        return False
-
     @staticmethod
     def discard_unsafe_fixes(
         lint_result: LintResult, templated_file: Optional[TemplatedFile]
@@ -760,11 +1013,11 @@ class BaseRule:
         """Makes simple fixes to the anchor position for fixes.
 
         Some rules return fixes where the anchor is too low in the tree. These
-        are most often rules like L003 and L016 that make whitespace changes
+        are most often rules like LT02 and LT05 that make whitespace changes
         without a "deep" understanding of the parse structure. This function
         attempts to correct those issues automatically. It may not be perfect,
-        but it should be an improvement over the old behavior, where rules like
-        L003 often corrupted the parse tree, placing spaces in weird places that
+        but it should be an improvement over the old behaviour, where rules like
+        LT02 often corrupted the parse tree, placing spaces in weird places that
         caused issues with other rules. For more context, see issue #1304.
         """
         if not cls._adjust_anchors:
@@ -774,7 +1027,12 @@ class BaseRule:
         for fix in lint_result.fixes:
             if fix.anchor:
                 fix.anchor = cls._choose_anchor_segment(
-                    context.parent_stack[0], fix.edit_type, fix.anchor
+                    # If no parent stack, that means the segment itself is the root
+                    context.parent_stack[0]
+                    if context.parent_stack
+                    else context.segment,
+                    fix.edit_type,
+                    fix.anchor,
                 )
 
     @staticmethod
@@ -844,10 +1102,49 @@ class BaseRule:
                     break
         return anchor
 
-    @staticmethod
-    def split_comma_separated_string(raw_str: str) -> List[str]:
-        """Converts comma separated string to List, stripping whitespace."""
-        return [s.strip() for s in raw_str.split(",") if s.strip()]
+
+@dataclass(frozen=True)
+class RuleManifest:
+    """Element in the rule register."""
+
+    code: str
+    name: str
+    description: str
+    groups: Tuple[str]
+    aliases: Tuple[str]
+    rule_class: Type[BaseRule]
+
+
+@dataclass
+class RulePack:
+    """A bundle of rules to be applied.
+
+    This contains a set of rules, post filtering but also contains the mapping
+    required to interpret any noqa messages found in files.
+
+    The reason for this object is that rules are filtered and instantiated
+    into this pack in the main process when running in multi-processing mode so
+    that user defined rules can be used without reference issues.
+
+    Attributes:
+        rules (:obj:`list` of :obj:`BaseRule`): A filtered list of instantiated
+            rules to be applied to a given file.
+        reference_map (:obj:`dict`): A mapping of rule references to the codes
+            they refer to, e.g. `{"my_ref": {"LT01", "LT02"}}`. The references
+            (i.e. the keys) may be codes, groups, aliases or names. The values
+            of the mapping are sets of rule codes *only*. This object acts as
+            a lookup to be able to translate selectors (which may contain
+            diverse references) into a consolidated list of rule codes. This
+            mapping contains the full set of rules, rather than just the filtered
+            set present in the `rules` attribute.
+    """
+
+    rules: List[BaseRule]
+    reference_map: Dict[str, Set[str]]
+
+    def codes(self) -> Iterator[str]:
+        """Returns an iterator through the codes contained in the pack."""
+        return (r.code for r in self.rules)
 
 
 class RuleSet:
@@ -873,22 +1170,22 @@ class RuleSet:
 
     """
 
-    def __init__(self, name, config_info):
+    def __init__(self, name, config_info) -> None:
         self.name = name
         self.config_info = config_info
-        self._register = {}
+        self._register: Dict[str, RuleManifest] = {}
 
-    def _validate_config_options(self, config, rule=None):
+    def _validate_config_options(self, config, rule_ref: Optional[str] = None):
         """Ensure that all config options are valid.
 
-        Config options can also be checked for a specific rule e.g L010.
+        Config options can also be checked for a specific rule e.g CP01.
         """
         rule_config = config.get_section("rules")
         for config_name, info_dict in self.config_info.items():
             config_option = (
                 rule_config.get(config_name)
-                if not rule
-                else rule_config.get(rule).get(config_name)
+                if not rule_ref
+                else rule_config.get(rule_ref).get(config_name)
             )
             valid_options = info_dict.get("validation")
             if (
@@ -906,28 +1203,13 @@ class RuleSet:
                     )
                 )
 
-    @property
-    def valid_rule_name_regex(self):
-        """Defines the accepted pattern for rule names.
-
-        The first group captures the plugin name (optional), which
-        must be capitalized.
-        The second group captures the rule code.
-
-        Examples of valid rule names:
-
-        * Rule_PluginName_L001
-        * Rule_L001
-        """
-        return regex.compile(r"Rule_?([A-Z]{1}[a-zA-Z]+)?_([A-Z][0-9]{3})")
-
     def register(self, cls, plugin=None):
         """Decorate a class with this to add it to the ruleset.
 
         .. code-block:: python
 
            @myruleset.register
-           class Rule_L001(BaseRule):
+           class Rule_LT01(BaseRule):
                "Description of rule."
 
                def eval(self, **kwargs):
@@ -941,25 +1223,9 @@ class RuleSet:
         :exc:`ValueError`.
 
         """
-        rule_name_match = self.valid_rule_name_regex.match(cls.__name__)
-        # Validate the name
-        if not rule_name_match:  # pragma: no cover
-            raise ValueError(
-                (
-                    "Tried to register rule on set {!r} with unexpected "
-                    "format: {}, format should be: Rule_PluginName_L123 (for plugins) "
-                    "or Rule_L123 (for core rules)."
-                ).format(self.name, cls.__name__)
-            )
-
-        plugin_name, code = rule_name_match.groups()
-        # If the docstring is multiline, then we extract just summary.
-        description = cls.__doc__.replace("``", "'").split("\n")[0]
-
-        if plugin_name:
-            code = f"{plugin_name}_{code}"
+        code = cls.code
 
-        # Keep track of the *class* in the register. Don't instantiate yet.
+        # Check for code collisions.
         if code in self._register:  # pragma: no cover
             raise ValueError(
                 "Rule {!r} has already been registered on RuleSet {!r}!".format(
@@ -967,104 +1233,202 @@ class RuleSet:
                 )
             )
 
-        try:
-            assert (
-                "all" in cls.groups
-            ), "Rule {!r} must belong to the 'all' group".format(code)
-            groups = cls.groups
-        except AttributeError as attr_err:
-            raise AttributeError(
-                (
-                    "Rule {!r} doesn't belong to any rule groups. "
-                    "All rules must belong to at least one group"
-                ).format(code)
-            ) from attr_err
-
-        self._register[code] = dict(
-            code=code, description=description, groups=groups, cls=cls
+        assert "all" in cls.groups, "Rule {!r} must belong to the 'all' group".format(
+            code
+        )
+
+        self._register[code] = RuleManifest(
+            code=code,
+            name=cls.name,
+            description=cls.description,
+            groups=cls.groups,
+            aliases=cls.aliases,
+            rule_class=cls,
         )
 
         # Make sure we actually return the original class
         return cls
 
-    def _expand_config_rule_group_list(
-        self, rule_list: List[str], valid_groups: Set[str]
-    ) -> List[str]:
-        expanded_rule_list: List[str] = []
-        for r in rule_list:
-            if r in valid_groups:
-                rules_in_group = [
-                    rule
-                    for rule, rule_dict in self._register.items()
-                    if r in rule_dict["groups"]
-                ]
-                expanded_rule_list.extend(rules_in_group)
-            else:
-                expanded_rule_list.extend(r)
-
-        return expanded_rule_list
-
-    def _expand_config_rule_glob_list(self, glob_list: List[str]) -> List[str]:
-        """Expand a list of rule globs into a list of rule codes.
+    def _expand_rule_refs(
+        self, glob_list: List[str], reference_map: Dict[str, Set[str]]
+    ) -> Set[str]:
+        """Expand a list of rule references into a list of rule codes.
 
         Returns:
-            :obj:`list` of :obj:`str` rule codes.
+            :obj:`set` of :obj:`str` rule codes.
 
         """
-        expanded_glob_list = []
+        expanded_rule_set: Set[str] = set()
         for r in glob_list:
-            expanded_glob_list.extend(
-                [
-                    x
-                    for x in fnmatch.filter(self._register, r)
-                    if x not in expanded_glob_list
-                ]
+            # Is it a direct reference?
+            if r in reference_map:
+                expanded_rule_set.update(reference_map[r])
+            # Otherwise treat as a glob expression on all references.
+            # NOTE: We expand _all_ references (i.e. groups, aliases, names
+            # AND codes) so that we preserve the most backward compatibility
+            # with existing references to legacy codes in config files.
+            else:
+                matched_refs = fnmatch.filter(reference_map.keys(), r)
+                for matched in matched_refs:
+                    expanded_rule_set.update(reference_map[matched])
+        return expanded_rule_set
+
+    def rule_reference_map(self) -> Dict[str, Set[str]]:
+        """Generate a rule reference map for looking up rules.
+
+        Generate the master reference map. The priority order is:
+        codes > names > groups > aliases
+        (i.e. if there's a collision between a name and an alias - we assume
+        the alias is wrong)
+        """
+        valid_codes: Set[str] = set(self._register.keys())
+        reference_map: Dict[str, Set[str]] = {code: {code} for code in valid_codes}
+
+        # Generate name map.
+        name_map: Dict[str, Set[str]] = {
+            manifest.name: {manifest.code}
+            for manifest in self._register.values()
+            if manifest.name
+        }
+        # Check collisions.
+        name_collisions = set(name_map.keys()) & valid_codes
+        if name_collisions:  # pragma: no cover
+            # NOTE: This clause is untested, because it's quite hard to actually
+            # have a valid name which replicates a valid code. The name validation
+            # will probably catch it first.
+            rules_logger.warning(
+                "The following defined rule names were found which collide "
+                "with codes. Those names will not be available for selection: %s",
+                name_collisions,
             )
+        # Incorporate (with existing references taking precedence).
+        reference_map = {**name_map, **reference_map}
+
+        # Generate the group map.
+        group_map: DefaultDict[str, Set[str]] = defaultdict(set)
+        for manifest in self._register.values():
+            for group in manifest.groups:
+                if group in reference_map:
+                    rules_logger.warning(
+                        "Rule %s defines group %r which is already defined as a "
+                        "name or code of %s. This group will not be available "
+                        "for use as a result of this collision.",
+                        manifest.code,
+                        group,
+                        reference_map[group],
+                    )
+                else:
+                    group_map[group].add(manifest.code)
+        # Incorporate after all checks are done.
+        reference_map = {**group_map, **reference_map}
+
+        # Generate the alias map.
+        alias_map: DefaultDict[str, Set[str]] = defaultdict(set)
+        for manifest in self._register.values():
+            for alias in manifest.aliases:
+                if alias in reference_map:
+                    rules_logger.warning(
+                        "Rule %s defines alias %r which is already defined as a "
+                        "name, code or group of %s. This alias will "
+                        "not be available for use as a result of this collision.",
+                        manifest.code,
+                        alias,
+                        reference_map[alias],
+                    )
+                else:
+                    alias_map[alias].add(manifest.code)
+        # Incorporate after all checks are done.
+        return {**alias_map, **reference_map}
 
-        return expanded_glob_list
-
-    def get_rulelist(self, config) -> List[BaseRule]:
+    def get_rulepack(self, config) -> RulePack:
         """Use the config to return the appropriate rules.
 
         We use the config both for allowlisting and denylisting, but also
         for configuring the rules given the given config.
-
-        Returns:
-            :obj:`list` of instantiated :obj:`BaseRule`.
-
         """
         # Validate all generic rule configs
         self._validate_config_options(config)
-        # Find all valid groups for ruleset
-        valid_groups: Set[str] = set(
-            [group for attrs in self._register.values() for group in attrs["groups"]]
+
+        # Fetch config section:
+        rules_config = config.get_section("rules")
+
+        # Generate the master reference map. The priority order is:
+        # codes > names > groups > aliases
+        # (i.e. if there's a collision between a name and an
+        # alias - we assume the alias is wrong.)
+        valid_codes: Set[str] = set(self._register.keys())
+        reference_map = self.rule_reference_map()
+        valid_config_lookups = set(
+            manifest.rule_class.get_config_ref() for manifest in self._register.values()
         )
-        # default the allowlist to all the rules if not set
-        allowlist = config.get("rule_allowlist") or list(self._register.keys())
+
+        # Validate config doesn't try to specify values for unknown rules.
+        # NOTE: We _warn_ here rather than error.
+        for unexpected_ref in [
+            # Filtering to dicts gives us the sections.
+            k
+            for k, v in rules_config.items()
+            if isinstance(v, dict)
+            # Only keeping ones we don't expect
+            if k not in valid_config_lookups
+        ]:
+            rules_logger.warning(
+                "Rule configuration contain a section for unexpected "
+                f"rule {unexpected_ref!r}. These values will be ignored."
+            )
+            # For convenience (and migration), if we do find a potential match
+            # for the reference - add that as a warning.
+            # NOTE: We don't actually accept config in these cases, even though
+            # we could potentially match - because how to resolve _multiple_
+            # matching config sections is ambiguous.
+            if unexpected_ref in reference_map:
+                referenced_codes = reference_map[unexpected_ref]
+                if len(referenced_codes) == 1:
+                    referenced_code = list(referenced_codes)[0]
+                    referenced_name = self._register[referenced_code].name
+                    config_ref = self._register[
+                        referenced_code
+                    ].rule_class.get_config_ref()
+                    rules_logger.warning(
+                        "The reference was however found as a match for rule "
+                        f"{referenced_code} with name {referenced_name!r}. "
+                        "SQLFluff assumes configuration for this rule will "
+                        f"be specified in 'sqlfluff:rules:{config_ref}'."
+                    )
+                elif referenced_codes:
+                    rules_logger.warning(
+                        "The reference was found as a match for multiple rules: "
+                        f"{referenced_codes}. Config should be specified by the "
+                        "name of the relevant rule e.g. "
+                        "'sqlfluff:rules:capitalisation.keywords'."
+                    )
+
+        # The lists here are lists of references, which might be codes,
+        # names, aliases or groups.
+        # We default the allowlist to all the rules if not set (i.e. not specifying
+        # any rules, just means "all the rules").
+        allowlist = config.get("rule_allowlist") or list(valid_codes)
         denylist = config.get("rule_denylist") or []
-        valid_rules_and_groups = list(self._register) + list(valid_groups)
 
         allowlisted_unknown_rule_codes = [
             r
             for r in allowlist
             # Add valid groups to the register when searching for invalid rules _only_
-            if not fnmatch.filter(valid_rules_and_groups, r)
+            if not fnmatch.filter(reference_map.keys(), r)
         ]
         if any(allowlisted_unknown_rule_codes):
             rules_logger.warning(
-                "Tried to allowlist unknown rules: {!r}".format(
+                "Tried to allowlist unknown rule references: {!r}".format(
                     allowlisted_unknown_rule_codes
                 )
             )
 
         denylisted_unknown_rule_codes = [
-            r
-            for r in denylist
-            if not fnmatch.filter({**self._register, **dict.fromkeys(valid_groups)}, r)
+            r for r in denylist if not fnmatch.filter(reference_map.keys(), r)
         ]
         if any(denylisted_unknown_rule_codes):  # pragma: no cover
             rules_logger.warning(
-                "Tried to denylist unknown rules: {!r}".format(
+                "Tried to denylist unknown rules references: {!r}".format(
                     denylisted_unknown_rule_codes
                 )
             )
@@ -1072,39 +1436,40 @@ class RuleSet:
         keylist = sorted(self._register.keys())
 
         # First we expand the allowlist and denylist globs
-        expanded_allowlist = self._expand_config_rule_glob_list(
-            allowlist
-        ) + self._expand_config_rule_group_list(allowlist, valid_groups)
-        expanded_denylist = self._expand_config_rule_glob_list(
-            denylist
-        ) + self._expand_config_rule_group_list(denylist, valid_groups)
+        expanded_allowlist = self._expand_rule_refs(allowlist, reference_map)
+        expanded_denylist = self._expand_rule_refs(denylist, reference_map)
 
         # Then we filter the rules
         keylist = [
             r for r in keylist if r in expanded_allowlist and r not in expanded_denylist
         ]
 
-        # Construct the kwargs for instantiation before we actually do it.
-        rule_kwargs = {}
-        for k in keylist:
+        # Construct the kwargs for each rule and instantiate in turn.
+        instantiated_rules = []
+        # Keep only config which isn't a section (for specific rule) (i.e. isn't a dict)
+        # We'll handle those directly in the specific rule config section below.
+        generic_rule_config = {
+            k: v for k, v in rules_config.items() if not isinstance(v, dict)
+        }
+        for code in keylist:
             kwargs = {}
-            generic_rule_config = config.get_section("rules")
-            specific_rule_config = config.get_section(
-                ("rules", self._register[k]["code"])
-            )
+            rule_class = cast(Type[BaseRule], self._register[code].rule_class)
+            # Fetch the lookup code for the rule.
+            rule_config_ref = rule_class.get_config_ref()
+            specific_rule_config = config.get_section(("rules", rule_config_ref))
             if generic_rule_config:
                 kwargs.update(generic_rule_config)
             if specific_rule_config:
                 # Validate specific rule config before adding
-                self._validate_config_options(config, self._register[k]["code"])
+                self._validate_config_options(config, rule_config_ref)
                 kwargs.update(specific_rule_config)
-            kwargs["code"] = self._register[k]["code"]
+            kwargs["code"] = code
             # Allow variable substitution in making the description
-            kwargs["description"] = self._register[k]["description"].format(**kwargs)
-            rule_kwargs[k] = kwargs
+            kwargs["description"] = self._register[code].description.format(**kwargs)
+            # Instantiate when ready
+            instantiated_rules.append(rule_class(**kwargs))
 
-        # Instantiate in the final step
-        return [self._register[k]["cls"](**rule_kwargs[k]) for k in keylist]
+        return RulePack(instantiated_rules, reference_map)
 
     def copy(self):
         """Return a copy of self with a separate register."""
diff --git a/src/sqlfluff/core/rules/config_info.py b/src/sqlfluff/core/rules/config_info.py
index 063937c..e750e69 100644
--- a/src/sqlfluff/core/rules/config_info.py
+++ b/src/sqlfluff/core/rules/config_info.py
@@ -17,12 +17,6 @@ STANDARD_CONFIG_INFO_DICT = {
             "Used in the fixing step of this rule."
         ),
     },
-    "max_line_length": {
-        "validation": range(1000),
-        "definition": (
-            "The maximum length of a line to allow without raising a violation."
-        ),
-    },
     "indent_unit": {
         "validation": ["space", "tab"],
         "definition": "Whether to use tabs or spaces to add new indents.",
@@ -167,6 +161,13 @@ STANDARD_CONFIG_INFO_DICT = {
             "Defaults to ``False``."
         ),
     },
+    "prefer_quoted_keywords": {
+        "validation": [True, False],
+        "definition": (
+            "If ``True``, requires every keyword used as an identifier to be quoted. "
+            "Defaults to ``False``."
+        ),
+    },
     "blocked_words": {
         "definition": (
             "Optional, comma-separated list of blocked words which should not be used "
@@ -178,6 +179,11 @@ STANDARD_CONFIG_INFO_DICT = {
             "Optional, regex of blocked pattern which should not be used in statements."
         ),
     },
+    "match_source": {
+        "definition": (
+            "Optional, also match regex of blocked pattern before applying templating"
+        ),
+    },
     "preferred_quoted_literal_style": {
         "validation": ["consistent", "single_quotes", "double_quotes"],
         "definition": (
diff --git a/src/sqlfluff/core/rules/crawlers.py b/src/sqlfluff/core/rules/crawlers.py
index 5baa6d2..7195f53 100644
--- a/src/sqlfluff/core/rules/crawlers.py
+++ b/src/sqlfluff/core/rules/crawlers.py
@@ -70,7 +70,7 @@ class SegmentSeekerCrawler(BaseCrawler):
         # Check whether we should consider this segment _or it's children_
         # at all.
         if not self.passes_filter(context.segment):
-            if self.provide_raw_stack:
+            if self.provide_raw_stack:  # pragma: no cover
                 context.raw_stack += tuple(context.segment.raw_segments)
             return
 
diff --git a/src/sqlfluff/core/rules/doc_decorators.py b/src/sqlfluff/core/rules/doc_decorators.py
index 7a5e4f9..716a910 100644
--- a/src/sqlfluff/core/rules/doc_decorators.py
+++ b/src/sqlfluff/core/rules/doc_decorators.py
@@ -1,110 +1,39 @@
-"""A collection of decorators to modify rule docstrings for Sphinx."""
+"""A collection of decorators to modify rule docstrings for Sphinx.
 
-from sqlfluff.core.rules.config_info import get_config_info
-from sqlfluff.core.rules.base import rules_logger  # noqa
-import re
+NOTE: All of these decorators are deprecated from SQLFluff 2.0.0 onwards.
 
+They are still included to allow a transition period, but the functionality
+is now packaged in the BaseRule class via the RuleMetaclass.
+"""
 
-FIX_COMPATIBLE = "    This rule is ``sqlfluff fix`` compatible."
+from sqlfluff.core.rules.base import rules_logger  # noqa
 
 
 def document_fix_compatible(cls):
     """Mark the rule as fixable in the documentation."""
-    # Match `**Anti-pattern**`, `.. note::` and `**Configuration**`,
-    # then insert fix_compatible before the first occurrences.
-    # We match `**Configuration**` here to make it work in all order of doc decorators
-    pattern = re.compile(
-        "(\\s{4}\\*\\*Anti-pattern\\*\\*|\\s{4}\\.\\. note::|"
-        "\\s{4}\\*\\*Configuration\\*\\*)",
-        flags=re.MULTILINE,
+    rules_logger.warning(
+        f"{cls.__name__} uses the @document_fix_compatible decorator "
+        "which is deprecated in SQLFluff 2.0.0. Remove the decorator "
+        "to resolve this warning."
     )
-    cls.__doc__ = pattern.sub(f"\n\n{FIX_COMPATIBLE}\n\n\\1", cls.__doc__, count=1)
     return cls
 
 
-def is_fix_compatible(cls) -> bool:
-    """Return whether the rule is documented as fixable."""
-    return FIX_COMPATIBLE in cls.__doc__
-
-
 def document_groups(cls):
     """Mark the rule as fixable in the documentation."""
-    # Match `**Anti-pattern**`, `.. note::` and `**Configuration**`,
-    # then insert group documentation    before the first occurrences.
-    # We match `**Configuration**` here to make it work in all order of doc decorators
-    pattern = re.compile(
-        "(\\s{4}\\*\\*Anti-pattern\\*\\*|\\s{4}\\.\\. note::|"
-        "\\s\\s{4}\\*\\*Configuration\\*\\*)",
-        flags=re.MULTILINE,
+    rules_logger.warning(
+        f"{cls.__name__} uses the @document_groups decorator "
+        "which is deprecated in SQLFluff 2.0.0. Remove the decorator "
+        "to resolve this warning."
     )
-
-    groups_docs = (
-        "\n    **Groups**: ``" + "``, ``".join(getattr(cls, "groups")) + "``\n"
-    )
-    cls.__doc__ = pattern.sub(f"\n\n{groups_docs}\n\n\\1", cls.__doc__, count=1)
     return cls
 
 
-def is_documenting_groups(cls) -> bool:
-    """Return whether the rule groups are documented."""
-    return "\n    **Groups**: " in cls.__doc__
-
-
-def document_configuration(cls, ruleset="std"):
-    """Add a 'Configuration' section to a Rule docstring.
-
-    Utilize the the metadata in config_info to dynamically
-    document the configuration options for a given rule.
-
-    This is a little hacky, but it allows us to propagate configuration
-    options in the docs, from a single source of truth.
-    """
-    if ruleset == "std":
-        config_info = get_config_info()
-    else:  # pragma: no cover
-        raise (
-            NotImplementedError(
-                "Add another config info dict for the new ruleset here!"
-            )
-        )
-
-    config_doc = "\n    **Configuration**\n"
-    try:
-        for keyword in sorted(cls.config_keywords):
-            try:
-                info_dict = config_info[keyword]
-            except KeyError:  # pragma: no cover
-                raise KeyError(
-                    "Config value {!r} for rule {} is not configured in "
-                    "`config_info`.".format(keyword, cls.__name__)
-                )
-            config_doc += "\n    * ``{}``: {}".format(keyword, info_dict["definition"])
-            if (
-                config_doc[-1] != "."
-                and config_doc[-1] != "?"
-                and config_doc[-1] != "\n"
-            ):
-                config_doc += "."
-            if "validation" in info_dict:
-                config_doc += " Must be one of ``{}``.".format(info_dict["validation"])
-    except AttributeError:
-        rules_logger.info(f"No config_keywords defined for {cls.__name__}")
-        return cls
-    # Add final blank line
-    config_doc += "\n"
-
-    if "**Anti-pattern**" in cls.__doc__:
-        # Match `**Anti-pattern**`, then insert configuration before
-        # the first occurrences
-        pattern = re.compile("(\\s{4}\\*\\*Anti-pattern\\*\\*)", flags=re.MULTILINE)
-        cls.__doc__ = pattern.sub(f"\n{config_doc}\n\\1", cls.__doc__, count=1)
-    else:
-        # Match last `\n` or `.`, then append configuration
-        pattern = re.compile("(\\.|\\n)$", flags=re.MULTILINE)
-        cls.__doc__ = pattern.sub(f"\\1\n{config_doc}\n", cls.__doc__, count=1)
+def document_configuration(cls, **kwargs):
+    """Add a 'Configuration' section to a Rule docstring."""
+    rules_logger.warning(
+        f"{cls.__name__} uses the @document_configuration decorator "
+        "which is deprecated in SQLFluff 2.0.0. Remove the decorator "
+        "to resolve this warning."
+    )
     return cls
-
-
-def is_configurable(cls) -> bool:
-    """Return whether the rule is documented as fixable."""
-    return "**Configuration**" in cls.__doc__
diff --git a/src/sqlfluff/core/slice_helpers.py b/src/sqlfluff/core/slice_helpers.py
new file mode 100644
index 0000000..561f2cd
--- /dev/null
+++ b/src/sqlfluff/core/slice_helpers.py
@@ -0,0 +1,29 @@
+"""Helpers for handling slices."""
+
+from typing import Tuple
+
+
+def to_tuple(s: slice) -> Tuple[int, int]:
+    """Convert a slice into a tuple of (start, stop)."""
+    assert s.start is not None and s.stop is not None
+    return (s.start, s.stop)
+
+
+def slice_length(s: slice) -> int:
+    """Get the length of a slice."""
+    return s.stop - s.start
+
+
+def is_zero_slice(s: slice) -> bool:
+    """Return true if this is a zero slice."""
+    return s.stop == s.start
+
+
+def zero_slice(i: int) -> slice:
+    """Construct a zero slice from a single integer."""
+    return slice(i, i)
+
+
+def offset_slice(start: int, offset: int) -> slice:
+    """Construct a slice from a start and offset."""
+    return slice(start, start + offset)
diff --git a/src/sqlfluff/core/templaters/base.py b/src/sqlfluff/core/templaters/base.py
index 9fa1674..9253f0e 100644
--- a/src/sqlfluff/core/templaters/base.py
+++ b/src/sqlfluff/core/templaters/base.py
@@ -4,8 +4,8 @@ import logging
 from bisect import bisect_left
 from typing import Dict, Iterator, List, Tuple, Optional, NamedTuple, Iterable
 from sqlfluff.core.config import FluffConfig
-
 from sqlfluff.core.errors import SQLFluffSkipFile
+from sqlfluff.core.slice_helpers import zero_slice
 
 # Instantiate the templater logger
 templater_logger = logging.getLogger("sqlfluff.templater")
@@ -74,7 +74,13 @@ class RawFileSlice(NamedTuple):
         return slice(self.source_idx, self.end_source_idx())
 
     def is_source_only_slice(self):
-        """Based on its slice_type, does it only appear in the *source*?"""
+        """Based on its slice_type, does it only appear in the *source*?
+
+        There are some slice types which are automatically source only.
+        There are *also* some which are source only because they render
+        to an empty string.
+        """
+        # TODO: should any new logic go here?
         return self.slice_type in ("comment", "block_end", "block_start", "block_mid")
 
 
@@ -248,6 +254,10 @@ class TemplatedFile:
         start_idx = start_idx or 0
         first_idx = None
         last_idx = start_idx
+        # Work through the sliced file, starting at the start_idx if given
+        # as an optimisation hint. The sliced_file is a list of TemplatedFileSlice
+        # which reference parts of the templated file and where they exist in the
+        # source.
         for idx, elem in enumerate(self.sliced_file[start_idx:]):
             last_idx = idx + start_idx
             if elem[2].stop >= templated_pos:
@@ -322,7 +332,7 @@ class TemplatedFile:
         if template_slice.start == template_slice.stop:
             # Is it on a join?
             if insertion_point >= 0:
-                return slice(insertion_point, insertion_point)
+                return zero_slice(insertion_point)
             # It's within a segment.
             else:
                 if (
@@ -330,8 +340,7 @@ class TemplatedFile:
                     and ts_start_subsliced_file[0][0] == "literal"
                 ):
                     offset = template_slice.start - ts_start_subsliced_file[0][2].start
-                    return slice(
-                        ts_start_subsliced_file[0][1].start + offset,
+                    return zero_slice(
                         ts_start_subsliced_file[0][1].start + offset,
                     )
                 else:
diff --git a/src/sqlfluff/core/templaters/jinja.py b/src/sqlfluff/core/templaters/jinja.py
index 54a4414..e7730b5 100644
--- a/src/sqlfluff/core/templaters/jinja.py
+++ b/src/sqlfluff/core/templaters/jinja.py
@@ -2,6 +2,8 @@
 import logging
 import os.path
 import pkgutil
+import importlib
+import sys
 from functools import reduce
 from typing import Callable, Dict, Generator, List, Optional, Tuple
 
@@ -15,6 +17,7 @@ from jinja2 import (
 )
 from jinja2.environment import Template
 from jinja2.exceptions import TemplateNotFound, UndefinedError
+from jinja2.ext import Extension
 from jinja2.sandbox import SandboxedEnvironment
 
 from sqlfluff.core.config import FluffConfig
@@ -28,7 +31,6 @@ from sqlfluff.core.templaters.base import (
 from sqlfluff.core.templaters.python import PythonTemplater
 from sqlfluff.core.templaters.slicers.tracer import JinjaAnalyzer
 
-
 # Instantiate the templater logger
 templater_logger = logging.getLogger("sqlfluff.templater")
 
@@ -146,12 +148,18 @@ class JinjaTemplater(PythonTemplater):
             os.path.join(library_path, "..") if is_library_module else library_path
         )
 
-        for loader, module_name, is_pkg in pkgutil.walk_packages([walk_path]):
+        for module_finder, module_name, _ in pkgutil.walk_packages([walk_path]):
             # skip other modules that can be near module_dir
             if is_library_module and not module_name.startswith(library_module_name):
                 continue
 
-            module = loader.find_module(module_name).load_module(module_name)
+            # import_module is deprecated as of python 3.4. This follows roughly
+            # the guidance of the python docs:
+            # https://docs.python.org/3/library/importlib.html#approximating-importlib-import-module
+            spec = module_finder.find_spec(module_name)
+            module = importlib.util.module_from_spec(spec)
+            sys.modules[module_name] = module
+            spec.loader.exec_module(module)
 
             if "." in module_name:  # nested modules have `.` in module_name
                 *module_path, last_module_name = module_name.split(".")
@@ -259,12 +267,15 @@ class JinjaTemplater(PythonTemplater):
             loader = SafeFileSystemLoader(macros_path or [])
         else:
             loader = FileSystemLoader(macros_path) if macros_path else None
+        extensions = ["jinja2.ext.do"]
+        if self._apply_dbt_builtins(config):
+            extensions.append(DBTTestExtension)
 
         return SandboxedEnvironment(
             keep_trailing_newline=True,
             # The do extension allows the "do" directive
             autoescape=False,
-            extensions=["jinja2.ext.do"],
+            extensions=extensions,
             loader=loader,
         )
 
@@ -279,6 +290,13 @@ class JinjaTemplater(PythonTemplater):
                     return result
         return None
 
+    def _apply_dbt_builtins(self, config: FluffConfig) -> bool:
+        if config:
+            return config.get_section(
+                (self.templater_selector, self.name, "apply_dbt_builtins")
+            )
+        return False
+
     def get_context(self, fname=None, config=None, **kw) -> Dict:
         """Get the templating context from the config."""
         # Load the context
@@ -290,10 +308,7 @@ class JinjaTemplater(PythonTemplater):
             # so they can be used by the macros too
             live_context.update(self._extract_libraries_from_config(config=config))
 
-            apply_dbt_builtins = config.get_section(
-                (self.templater_selector, self.name, "apply_dbt_builtins")
-            )
-            if apply_dbt_builtins:
+            if self._apply_dbt_builtins(config):
                 # This feels a bit wrong defining these here, they should probably
                 # be configurable somewhere sensible. But for now they're not.
                 # TODO: Come up with a better solution.
@@ -503,7 +518,9 @@ class JinjaTemplater(PythonTemplater):
         if make_template is None:
             # make_template() was not provided. Use the base class
             # implementation instead.
-            return super().slice_file(raw_str, templated_str, config, **kwargs)
+            return super().slice_file(
+                raw_str, templated_str, config, **kwargs
+            )  # pragma: no cover
 
         templater_logger.info("Slicing File Template")
         templater_logger.debug("    Raw String: %r", raw_str)
@@ -596,3 +613,19 @@ class DummyUndefined(jinja2.Undefined):
 
     def __iter__(self):
         return [self].__iter__()
+
+
+class DBTTestExtension(Extension):
+    """Jinja extension to handle the dbt test tag."""
+
+    tags = {"test"}
+
+    def parse(self, parser):
+        """Parses out the contents of the test tag."""
+        node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)
+        test_name = parser.parse_assign_target(name_only=True).name
+
+        parser.parse_signature(node)
+        node.name = f"test_{test_name}"
+        node.body = parser.parse_statements(("name:endtest",), drop_needle=True)
+        return node
diff --git a/src/sqlfluff/core/templaters/placeholder.py b/src/sqlfluff/core/templaters/placeholder.py
index e24067a..aea8c45 100644
--- a/src/sqlfluff/core/templaters/placeholder.py
+++ b/src/sqlfluff/core/templaters/placeholder.py
@@ -6,6 +6,7 @@ from typing import Dict, Optional, Tuple
 
 
 from sqlfluff.core.errors import SQLTemplaterError
+from sqlfluff.core.slice_helpers import offset_slice
 
 from sqlfluff.core.templaters.base import (
     RawFileSlice,
@@ -165,10 +166,9 @@ class PlaceholderTemplater(RawTemplater):
                 TemplatedFileSlice(
                     slice_type="literal",
                     source_slice=slice(last_pos_raw, span[0], None),
-                    templated_slice=slice(
+                    templated_slice=offset_slice(
                         last_pos_templated,
-                        last_pos_templated + last_literal_length,
-                        None,
+                        last_literal_length,
                     ),
                 )
             )
@@ -185,10 +185,8 @@ class PlaceholderTemplater(RawTemplater):
             template_slices.append(
                 TemplatedFileSlice(
                     slice_type="templated",
-                    source_slice=slice(span[0], span[1], None),
-                    templated_slice=slice(
-                        start_template_pos, start_template_pos + len(replacement), None
-                    ),
+                    source_slice=slice(span[0], span[1]),
+                    templated_slice=offset_slice(start_template_pos, len(replacement)),
                 )
             )
             raw_slices.append(
@@ -207,11 +205,10 @@ class PlaceholderTemplater(RawTemplater):
             template_slices.append(
                 TemplatedFileSlice(
                     slice_type="literal",
-                    source_slice=slice(last_pos_raw, len(in_str), None),
-                    templated_slice=slice(
+                    source_slice=slice(last_pos_raw, len(in_str)),
+                    templated_slice=offset_slice(
                         last_pos_templated,
-                        last_pos_templated + (len(in_str) - last_pos_raw),
-                        None,
+                        (len(in_str) - last_pos_raw),
                     ),
                 )
             )
diff --git a/src/sqlfluff/core/templaters/python.py b/src/sqlfluff/core/templaters/python.py
index 512429a..9d60d74 100644
--- a/src/sqlfluff/core/templaters/python.py
+++ b/src/sqlfluff/core/templaters/python.py
@@ -6,6 +6,7 @@ from typing import Iterable, Dict, Tuple, List, Iterator, Optional, NamedTuple
 
 from sqlfluff.core.errors import SQLTemplaterError
 from sqlfluff.core.string_helpers import findall
+from sqlfluff.core.slice_helpers import offset_slice, zero_slice
 
 from sqlfluff.core.templaters.base import (
     RawTemplater,
@@ -54,9 +55,9 @@ class IntermediateFileSlice(NamedTuple):
                 # Assume it's a literal, check the literal actually matches.
                 templated_len = len(focus.raw)
                 if target_end == "head":
-                    check_slice = slice(
+                    check_slice = offset_slice(
                         main_templated_slice.start,
-                        main_templated_slice.start + templated_len,
+                        templated_len,
                     )
                 else:
                     check_slice = slice(
@@ -367,7 +368,7 @@ class PythonTemplater(RawTemplater):
             slices.append(
                 TemplatedFileSlice(
                     "templated",
-                    slice(last_slice.source_slice.stop, last_slice.source_slice.stop),
+                    zero_slice(last_slice.source_slice.stop),
                     slice(last_slice.templated_slice.stop, len(templated_str)),
                 )
             )
@@ -505,14 +506,13 @@ class PythonTemplater(RawTemplater):
                 idx = None
                 yield IntermediateFileSlice(
                     "invariant",
-                    slice(
+                    offset_slice(
                         raw_file_slice.source_idx,
-                        raw_file_slice.source_idx + len(raw_file_slice.raw),
+                        len(raw_file_slice.raw),
                     ),
-                    slice(
+                    offset_slice(
                         templated_occurrences[raw_file_slice.raw][0],
-                        templated_occurrences[raw_file_slice.raw][0]
-                        + len(raw_file_slice.raw),
+                        len(raw_file_slice.raw),
                     ),
                     [
                         RawFileSlice(
@@ -1013,8 +1013,8 @@ class PythonTemplater(RawTemplater):
                 # Yield the literal
                 owu_literal_slice = TemplatedFileSlice(
                     "literal",
-                    slice(raw_idx, raw_idx + raw_len),
-                    slice(template_idx, template_idx + raw_len),
+                    offset_slice(raw_idx, raw_len),
+                    offset_slice(template_idx, raw_len),
                 )
                 templater_logger.debug(
                     "    Yielding Unique: %r, %s",
diff --git a/src/sqlfluff/core/templaters/slicers/tracer.py b/src/sqlfluff/core/templaters/slicers/tracer.py
index 72a7140..b97644a 100644
--- a/src/sqlfluff/core/templaters/slicers/tracer.py
+++ b/src/sqlfluff/core/templaters/slicers/tracer.py
@@ -76,7 +76,9 @@ class JinjaTracer:
         trace_template = self.make_template(trace_template_str)
         trace_template_output = trace_template.render()
         # Split output by section. Each section has two possible formats.
-        trace_entries = list(regex.finditer(r"\0", trace_template_output))
+        trace_entries: List[regex.Match] = list(
+            regex.finditer(r"\0", trace_template_output)
+        )
         for match_idx, match in enumerate(trace_entries):
             pos1 = match.span()[0]
             try:
@@ -184,7 +186,7 @@ class JinjaTracer:
                 slice(self.source_idx, self.source_idx + target_slice_length),
             )
         )
-        if slice_type in ("literal", "templated"):
+        if target_slice_length:
             self.source_idx += target_slice_length
 
 
@@ -454,7 +456,6 @@ class JinjaAnalyzer:
                     slice_idx = len(self.raw_sliced) - 1
                     self.idx_raw += len(str_buff)
                 if block_type.startswith("block"):
-                    self.track_block_start(block_type, tag_contents[0])
                     self.track_block_end(block_type, tag_contents[0])
                     self.update_next_slice_indices(
                         slice_idx, block_type, tag_contents[0]
@@ -522,7 +523,8 @@ class JinjaAnalyzer:
         # :TRICKY: Syntactically, the Jinja {% include %} directive looks like
         # a block, but its behavior is basically syntactic sugar for
         # {{ open("somefile).read() }}. Thus, treat it as templated code.
-        if tag_name == "include":
+        # It's a similar situation with {% import %} and {% from ... import %}.
+        if tag_name in ["include", "import", "from"]:
             block_type = "templated"
         elif tag_name.startswith("end"):
             block_type = "block_end"
@@ -559,20 +561,6 @@ class JinjaAnalyzer:
             trimmed_parts = trimmed_content.split()
         return trimmed_parts
 
-    def track_block_start(self, block_type: str, tag_name: str) -> None:
-        """On starting a 'call' block, set slice_type to "templated"."""
-        if block_type == "block_start" and tag_name == "call":
-            # Replace RawSliceInfo for this slice with one that has block_type
-            # "templated".
-            old_raw_file_slice = self.raw_sliced[-1]
-            self.raw_sliced[-1] = old_raw_file_slice._replace(slice_type="templated")
-
-            # Move existing raw_slice_info entry since it's keyed by RawFileSlice.
-            self.raw_slice_info[self.raw_sliced[-1]] = self.raw_slice_info[
-                old_raw_file_slice
-            ]
-            del self.raw_slice_info[old_raw_file_slice]
-
     def track_block_end(self, block_type: str, tag_name: str) -> None:
         """On ending a 'for' or 'if' block, set up tracking."""
         if block_type == "block_end" and tag_name in (
diff --git a/src/sqlfluff/core/timing.py b/src/sqlfluff/core/timing.py
index 5d674b3..f903f06 100644
--- a/src/sqlfluff/core/timing.py
+++ b/src/sqlfluff/core/timing.py
@@ -1,6 +1,6 @@
 """Timing summary class."""
 
-from typing import Optional, List, Dict
+from typing import Optional, List, Dict, Tuple, Set, Union
 from collections import defaultdict
 
 
@@ -38,3 +38,38 @@ class TimingSummary:
                     "avg": sum(vals[step]) / len(vals[step]),
                 }
         return summary
+
+
+class RuleTimingSummary:
+    """An object for tracking the timing of rules across many files."""
+
+    def __init__(self) -> None:
+        self._timings: List[Tuple[str, str, float]] = []
+
+    def add(self, rule_timings: List[Tuple[str, str, float]]):
+        """Add a set of rule timings."""
+        # Add records to the main list.
+        self._timings.extend(rule_timings)
+
+    def summary(self, threshold=0.5) -> Dict[str, Dict[str, Union[float, str]]]:
+        """Generate a summary for display."""
+        keys: Set[Tuple[str, str]] = set()
+        vals: Dict[Tuple[str, str], List[float]] = defaultdict(list)
+
+        for code, name, time in self._timings:
+            vals[(code, name)].append(time)
+            keys.add((code, name))
+
+        summary: Dict[str, Dict[str, Union[float, str]]] = {}
+        for code, name in sorted(keys):
+            timings = vals[(code, name)]
+            # For brevity, if the total time taken is less than
+            # `threshold`, then don't display.
+            if sum(timings) < threshold:
+                continue
+            summary[f"{code}: {name}"] = {
+                "sum (n)": f"{sum(timings):.2f} ({len(timings)})",
+                "min": min(timings),
+                "max": max(timings),
+            }
+        return summary
diff --git a/src/sqlfluff/dialects/dialect_ansi.py b/src/sqlfluff/dialects/dialect_ansi.py
index e7dbdec..676c1ea 100644
--- a/src/sqlfluff/dialects/dialect_ansi.py
+++ b/src/sqlfluff/dialects/dialect_ansi.py
@@ -31,6 +31,7 @@ from sqlfluff.core.parser import (
     Delimited,
     GreedyUntil,
     Indent,
+    ImplicitIndent,
     KeywordSegment,
     Matchable,
     MultiStringParser,
@@ -177,13 +178,15 @@ ansi_dialect.set_lexer_matchers(
         # (?>                      Atomic grouping
         #                          (https://www.regular-expressions.info/atomic.html).
         #     \d+\.\d+             e.g. 123.456
-        #     |\d+\.(?!\.)         e.g. 123.
+        #     |\d+\.(?![\.\w])     e.g. 123.
         #                          (N.B. negative lookahead assertion to ensure we
-        #                          don't match range operators `..` in Exasol).
+        #                          don't match range operators `..` in Exasol, and
+        #                          that in bigquery we don't match the "."
+        #                          in "asd-12.foo").
         #     |\.\d+               e.g. .456
         #     |\d+                 e.g. 123
         # )
-        # ([eE][+-]?\d+)?          Optional exponential.
+        # (\.?[eE][+-]?\d+)?          Optional exponential.
         # (
         #     (?<=\.)              If matched character ends with . (e.g. 123.) then
         #                          don't worry about word boundary check.
@@ -192,7 +195,7 @@ ansi_dialect.set_lexer_matchers(
         # )
         RegexLexer(
             "numeric_literal",
-            r"(?>\d+\.\d+|\d+\.(?!\.)|\.\d+|\d+)([eE][+-]?\d+)?((?<=\.)|(?=\b))",
+            r"(?>\d+\.\d+|\d+\.(?![\.\w])|\.\d+|\d+)(\.?[eE][+-]?\d+)?((?<=\.)|(?=\b))",
             LiteralSegment,
             segment_kwargs={"type": "numeric_literal"},
         ),
@@ -214,6 +217,7 @@ ansi_dialect.set_lexer_matchers(
         StringLexer("minus", "-", CodeSegment),
         StringLexer("divide", "/", CodeSegment),
         StringLexer("percent", "%", CodeSegment),
+        StringLexer("question", "?", CodeSegment),
         StringLexer("ampersand", "&", CodeSegment),
         StringLexer("vertical_bar", "|", CodeSegment),
         StringLexer("caret", "^", CodeSegment),
@@ -226,7 +230,10 @@ ansi_dialect.set_lexer_matchers(
         StringLexer("crly_bracket_close", "}", CodeSegment),
         StringLexer("colon", ":", CodeSegment),
         StringLexer("semicolon", ";", CodeSegment),
-        RegexLexer("code", r"[0-9a-zA-Z_]+", CodeSegment),
+        # This is the "fallback" lexer for anything else which looks like SQL.
+        RegexLexer(
+            "code", r"[0-9a-zA-Z_]+", CodeSegment, segment_kwargs={"type": "code"}
+        ),
     ]
 )
 
@@ -255,12 +262,11 @@ ansi_dialect.sets("datetime_units").update(
 ansi_dialect.sets("date_part_function_name").update(["DATEADD"])
 
 # Set Keywords
-ansi_dialect.sets("unreserved_keywords").update(
-    [n.strip().upper() for n in ansi_unreserved_keywords.split("\n")]
+ansi_dialect.update_keywords_set_from_multiline_string(
+    "unreserved_keywords", ansi_unreserved_keywords
 )
-
-ansi_dialect.sets("reserved_keywords").update(
-    [n.strip().upper() for n in ansi_reserved_keywords.split("\n")]
+ansi_dialect.update_keywords_set_from_multiline_string(
+    "reserved_keywords", ansi_reserved_keywords
 )
 
 # Bracket pairs (a set of tuples).
@@ -281,7 +287,7 @@ ansi_dialect.sets("bracket_pairs").update(
 # an item in "FROM", are treated as returning a COLUMN, not a TABLE. Apparently,
 # among dialects supported by SQLFluff, only BigQuery has this concept, but this
 # set is defined in the ANSI dialect because:
-# - It impacts core linter rules (see L020 and several other rules that subclass
+# - It impacts core linter rules (see AL04 and several other rules that subclass
 #   from it) and how they interpret the contents of table_expressions
 # - At least one other database (DB2) has the same value table function,
 #   UNNEST(), as BigQuery. DB2 is not currently supported by SQLFluff.
@@ -307,6 +313,7 @@ ansi_dialect.add(
     DotSegment=StringParser(".", SymbolSegment, type="dot"),
     StarSegment=StringParser("*", SymbolSegment, type="star"),
     TildeSegment=StringParser("~", SymbolSegment, type="tilde"),
+    ParameterSegment=StringParser("?", SymbolSegment, type="parameter"),
     CastOperatorSegment=StringParser("::", SymbolSegment, type="casting_operator"),
     PlusSegment=StringParser("+", SymbolSegment, type="binary_operator"),
     MinusSegment=StringParser("-", SymbolSegment, type="binary_operator"),
@@ -345,22 +352,22 @@ ansi_dialect.add(
             anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$",
         )
     ),
-    VersionIdentifierSegment=RegexParser(r"[A-Z0-9_.]*", IdentifierSegment),
     ParameterNameSegment=RegexParser(r"[A-Z][A-Z0-9_]*", CodeSegment, type="parameter"),
-    FunctionNameIdentifierSegment=RegexParser(
-        r"[A-Z][A-Z0-9_]*",
-        CodeSegment,
-        type="function_name_identifier",
+    FunctionNameIdentifierSegment=TypedParser(
+        "code", CodeSegment, type="function_name_identifier"
     ),
     # Maybe data types should be more restrictive?
     DatatypeIdentifierSegment=SegmentGenerator(
         # Generate the anti template from the set of reserved keywords
-        lambda dialect: RegexParser(
-            r"[A-Z][A-Z0-9_]*",
-            CodeSegment,
-            type="data_type_identifier",
-            anti_template=r"^(NOT)$",
-            # TODO - this is a stopgap until we implement explicit data types
+        lambda dialect: OneOf(
+            RegexParser(
+                r"[A-Z][A-Z0-9_]*",
+                CodeSegment,
+                type="data_type_identifier",
+                anti_template=r"^(NOT)$",
+                # TODO - this is a stopgap until we implement explicit data types
+            ),
+            Ref("SingleIdentifierGrammar", exclude=Ref("NakedIdentifierSegment")),
         ),
     ),
     # Ansi Intervals
@@ -414,9 +421,7 @@ ansi_dialect.add(
         Ref("BitwiseLShiftSegment"),
         Ref("BitwiseRShiftSegment"),
     ),
-    SignedSegmentGrammar=AnyNumberOf(
-        Ref("PositiveSegment"), Ref("NegativeSegment"), min_times=1
-    ),
+    SignedSegmentGrammar=OneOf(Ref("PositiveSegment"), Ref("NegativeSegment")),
     StringBinaryOperatorGrammar=OneOf(Ref("ConcatSegment")),
     BooleanBinaryOperatorGrammar=OneOf(
         Ref("AndOperatorGrammar"), Ref("OrOperatorGrammar")
@@ -435,7 +440,7 @@ ansi_dialect.add(
     # hookpoint for other dialects
     # e.g. EXASOL str to date cast with DATE '2021-01-01'
     # Give it a different type as needs to be single quotes and
-    # should not be changed by rules (e.g. rule L064)
+    # should not be changed by rules (e.g. rule CV10)
     DateTimeLiteralGrammar=Sequence(
         OneOf("DATE", "TIME", "TIMESTAMP", "INTERVAL"),
         TypedParser("single_quote", LiteralSegment, type="date_constructor_literal"),
@@ -453,6 +458,7 @@ ansi_dialect.add(
         Ref("NullLiteralSegment"),
         Ref("DateTimeLiteralGrammar"),
         Ref("ArrayLiteralSegment"),
+        Ref("TypedArrayLiteralSegment"),
         Ref("ObjectLiteralSegment"),
     ),
     AndOperatorGrammar=StringParser("AND", BinaryOperatorSegment),
@@ -503,6 +509,7 @@ ansi_dialect.add(
         "LIMIT",
         "OVERLAPS",
         Ref("SetOperatorSegment"),
+        "FETCH",
     ),
     # Define these as grammars to allow child dialects to enable them (since they are
     # non-standard keywords)
@@ -520,6 +527,7 @@ ansi_dialect.add(
         Ref("SetOperatorSegment"),
         Ref("WithNoSchemaBindingClauseSegment"),
         Ref("WithDataClauseSegment"),
+        "FETCH",
     ),
     WhereClauseTerminatorGrammar=OneOf(
         "LIMIT",
@@ -529,6 +537,7 @@ ansi_dialect.add(
         "QUALIFY",
         "WINDOW",
         "OVERLAPS",
+        "FETCH",
     ),
     GroupByClauseTerminatorGrammar=OneOf(
         Sequence("ORDER", "BY"),
@@ -536,12 +545,14 @@ ansi_dialect.add(
         "HAVING",
         "QUALIFY",
         "WINDOW",
+        "FETCH",
     ),
     HavingClauseTerminatorGrammar=OneOf(
         Sequence("ORDER", "BY"),
         "LIMIT",
         "QUALIFY",
         "WINDOW",
+        "FETCH",
     ),
     OrderByClauseTerminators=OneOf(
         "LIMIT",
@@ -551,6 +562,7 @@ ansi_dialect.add(
         "WINDOW",
         Ref("FrameClauseUnitGrammar"),
         "SEPARATOR",
+        "FETCH",
     ),
     PrimaryKeyGrammar=Sequence("PRIMARY", "KEY"),
     ForeignKeyGrammar=Sequence("FOREIGN", "KEY"),
@@ -563,8 +575,7 @@ ansi_dialect.add(
         ),
         OneOf(Sequence("ANY", "TYPE"), Ref("DatatypeSegment")),
     ),
-    # This is a placeholder for other dialects.
-    SimpleArrayTypeGrammar=Nothing(),
+    AutoIncrementGrammar=Sequence("AUTO_INCREMENT"),
     # Base Expression element is the right thing to reference for everything
     # which functions as an expression, but could include literals.
     BaseExpressionElementGrammar=OneOf(
@@ -578,6 +589,18 @@ ansi_dialect.add(
             Ref("DatatypeSegment"),
             Ref("LiteralGrammar"),
         ),
+        # These terminators allow better performance by giving a signal
+        # of a likely complete match if they come after a match. For
+        # example "123," only needs to match against the LiteralGrammar
+        # and because a comma follows, never be matched against
+        # ExpressionSegment or FunctionSegment, which are both much
+        # more complicated.
+        terminators=[
+            Ref("CommaSegment"),
+            # TODO: We can almost certainly add a few more here, but for
+            # now, the most reliable (and impactful) is the comma.
+            # Others could include some variant on AliasExpressionSegment.
+        ],
     ),
     FilterClauseGrammar=Sequence(
         "FILTER", Bracketed(Sequence("WHERE", Ref("ExpressionSegment")))
@@ -738,19 +761,91 @@ class IntervalExpressionSegment(BaseSegment):
     )
 
 
+class ArrayTypeSegment(BaseSegment):
+    """Prefix for array literals specifying the type.
+
+    Often "ARRAY" or "ARRAY<type>"
+    """
+
+    type = "array_type"
+    match_grammar: Matchable = Nothing()
+
+
+class SizedArrayTypeSegment(BaseSegment):
+    """Array type with a size."""
+
+    type = "sized_array_type"
+    match_grammar = Sequence(
+        Ref("ArrayTypeSegment"),
+        Ref("ArrayAccessorSegment"),
+    )
+
+
 class ArrayLiteralSegment(BaseSegment):
-    """An array literal segment."""
+    """An array literal segment.
+
+    An unqualified array literal:
+    e.g. [1, 2, 3]
+    """
 
     type = "array_literal"
+    match_grammar: Matchable = Bracketed(
+        Delimited(Ref("BaseExpressionElementGrammar"), optional=True),
+        bracket_type="square",
+    )
+
+
+class TypedArrayLiteralSegment(BaseSegment):
+    """An array literal segment."""
+
+    type = "typed_array_literal"
     match_grammar: Matchable = Sequence(
-        Ref("SimpleArrayTypeGrammar", optional=True),
-        Bracketed(
-            Delimited(Ref("BaseExpressionElementGrammar"), optional=True),
-            bracket_type="square",
+        Ref("ArrayTypeSegment"),
+        Ref("ArrayLiteralSegment"),
+    )
+
+
+class StructTypeSegment(BaseSegment):
+    """Expression to construct a STRUCT datatype.
+
+    (Used in BigQuery for example)
+    """
+
+    type = "struct_type"
+    match_grammar: Matchable = Nothing()
+
+
+class StructLiteralSegment(BaseSegment):
+    """An array literal segment.
+
+    An unqualified struct literal:
+    e.g. (1, 2 as foo, 3)
+
+    NOTE: This rarely exists without a preceding type
+    and exists mostly for structural & layout reasons.
+    """
+
+    type = "struct_literal"
+    match_grammar: Matchable = Bracketed(
+        Delimited(
+            Sequence(
+                Ref("BaseExpressionElementGrammar"),
+                Ref("AliasExpressionSegment", optional=True),
+            ),
         ),
     )
 
 
+class TypedStructLiteralSegment(BaseSegment):
+    """An array literal segment."""
+
+    type = "typed_struct_literal"
+    match_grammar: Matchable = Sequence(
+        Ref("StructTypeSegment"),
+        Ref("StructLiteralSegment"),
+    )
+
+
 class ObjectLiteralSegment(BaseSegment):
     """An object literal segment."""
 
@@ -784,6 +879,19 @@ class TimeZoneGrammar(BaseSegment):
     )
 
 
+class BracketedArguments(BaseSegment):
+    """A series of bracketed arguments.
+
+    e.g. the bracketed part of numeric(1, 3)
+    """
+
+    type = "bracketed_arguments"
+    match_grammar = Bracketed(
+        # The brackets might be empty for some cases...
+        Delimited(Ref("LiteralGrammar"), optional=True),
+    )
+
+
 class DatatypeSegment(BaseSegment):
     """A data type segment.
 
@@ -820,15 +928,8 @@ class DatatypeSegment(BaseSegment):
                     allow_gaps=False,
                 ),
             ),
-            Bracketed(
-                OneOf(
-                    Delimited(Ref("ExpressionSegment")),
-                    # The brackets might be empty for some cases...
-                    optional=True,
-                ),
-                # There may be no brackets for some data types
-                optional=True,
-            ),
+            # There may be no brackets for some data types
+            Ref("BracketedArguments", optional=True),
             OneOf(
                 "UNSIGNED",  # UNSIGNED MySQL
                 Ref("CharCharacterSetGrammar"),
@@ -968,6 +1069,12 @@ class IndexReferenceSegment(ObjectReferenceSegment):
     type = "index_reference"
 
 
+class CollationReferenceSegment(ObjectReferenceSegment):
+    """A reference to a collation."""
+
+    type = "collation_reference"
+
+
 class RoleReferenceSegment(ObjectReferenceSegment):
     """A reference to a role, user, or account."""
 
@@ -1057,6 +1164,7 @@ class AliasExpressionSegment(BaseSegment):
 
     type = "alias_expression"
     match_grammar: Matchable = Sequence(
+        Indent,
         Ref.keyword("AS", optional=True),
         OneOf(
             Sequence(
@@ -1066,6 +1174,7 @@ class AliasExpressionSegment(BaseSegment):
             ),
             Ref("SingleQuotedIdentifierSegment"),
         ),
+        Dedent,
     )
 
 
@@ -1090,7 +1199,7 @@ class ShorthandCastSegment(BaseSegment):
 
 
 class QualifiedNumericLiteralSegment(BaseSegment):
-    """A numeric literal with one or more + or - signs preceding.
+    """A numeric literal with one + or - sign preceding.
 
     The qualified numeric literal is a compound of a raw
     literal and a plus/minus sign. We do it this way rather
@@ -1174,6 +1283,7 @@ class OverClauseSegment(BaseSegment):
 
     type = "over_clause"
     match_grammar: Matchable = Sequence(
+        Indent,
         Sequence(OneOf("IGNORE", "RESPECT"), "NULLS", optional=True),
         "OVER",
         OneOf(
@@ -1182,6 +1292,7 @@ class OverClauseSegment(BaseSegment):
                 Ref("WindowSpecificationSegment", optional=True),
             ),
         ),
+        Dedent,
     )
 
 
@@ -1410,9 +1521,13 @@ class FromExpressionSegment(BaseSegment):
             Ref("MLTableExpressionSegment"),
             Ref("FromExpressionElementSegment"),
         ),
-        Conditional(Dedent, indented_joins=False),
+        Dedent,
+        Conditional(Indent, indented_joins=True),
         AnyNumberOf(
-            Ref("JoinClauseSegment"), Ref("JoinLikeClauseGrammar"), optional=True
+            Sequence(
+                OneOf(Ref("JoinClauseSegment"), Ref("JoinLikeClauseGrammar")),
+            ),
+            optional=True,
         ),
         Conditional(Dedent, indented_joins=True),
     )
@@ -1555,6 +1670,7 @@ class SelectClauseSegment(BaseSegment):
             "LIMIT",
             "OVERLAPS",
             Ref("SetOperatorSegment"),
+            "FETCH",
         ),
         enforce_whitespace_preceding_terminator=True,
     )
@@ -1572,10 +1688,13 @@ class JoinClauseSegment(BaseSegment):
             Ref("JoinTypeKeywordsGrammar", optional=True),
             Ref("JoinKeywordsGrammar"),
             Indent,
+            Ref("FromExpressionElementSegment"),
+            AnyNumberOf(Ref("NestedJoinGrammar")),
+            Dedent,
             Sequence(
-                Ref("FromExpressionElementSegment"),
-                AnyNumberOf(Ref("NestedJoinGrammar")),
-                Conditional(Dedent, indented_using_on=False),
+                # Using nested sequence here so we only get the indents
+                # if we also have content.
+                Conditional(Indent, indented_using_on=True),
                 # NB: this is optional
                 OneOf(
                     # ON clause
@@ -1601,11 +1720,10 @@ class JoinClauseSegment(BaseSegment):
                     ),
                     # Unqualified joins *are* allowed. They just might not
                     # be a good idea.
-                    optional=True,
                 ),
-                Conditional(Indent, indented_using_on=False),
+                Conditional(Dedent, indented_using_on=True),
+                optional=True,
             ),
-            Dedent,
         ),
         # Note NATURAL joins do not support Join conditions
         Sequence(
@@ -1660,7 +1778,7 @@ class JoinOnConditionSegment(BaseSegment):
     type = "join_on_condition"
     match_grammar: Matchable = Sequence(
         "ON",
-        Conditional(Indent, indented_on_contents=True),
+        Conditional(ImplicitIndent, indented_on_contents=True),
         OptionallyBracketed(Ref("ExpressionSegment")),
         Conditional(Dedent, indented_on_contents=True),
     )
@@ -1741,11 +1859,21 @@ class WhenClauseSegment(BaseSegment):
     type = "when_clause"
     match_grammar: Matchable = Sequence(
         "WHEN",
-        Indent,
-        Ref("ExpressionSegment"),
+        # NOTE: The nested sequence here is to ensure the correct
+        # placement of the meta segments when templated elements
+        # are present.
+        # https://github.com/sqlfluff/sqlfluff/issues/3988
+        Sequence(
+            ImplicitIndent,
+            Ref("ExpressionSegment"),
+            Dedent,
+        ),
+        Conditional(Indent, indented_then=True),
         "THEN",
+        ImplicitIndent,
         Ref("ExpressionSegment"),
         Dedent,
+        Conditional(Dedent, indented_then=True),
     )
 
 
@@ -1754,7 +1882,7 @@ class ElseClauseSegment(BaseSegment):
 
     type = "else_clause"
     match_grammar: Matchable = Sequence(
-        "ELSE", Indent, Ref("ExpressionSegment"), Dedent
+        "ELSE", ImplicitIndent, Ref("ExpressionSegment"), Dedent
     )
 
 
@@ -1786,41 +1914,64 @@ class CaseExpressionSegment(BaseSegment):
 ansi_dialect.add(
     # Expression_A_Grammar
     # https://www.cockroachlabs.com/docs/v20.2/sql-grammar.html#a_expr
-    Expression_A_Grammar=Sequence(
-        OneOf(
-            Ref("Expression_C_Grammar"),
-            Sequence(
-                OneOf(
-                    Ref("SignedSegmentGrammar"),
-                    # Ref('TildeSegment'),
-                    Ref("NotOperatorGrammar"),
-                    "PRIOR",
-                    # used in CONNECT BY clauses (EXASOL, Snowflake, Postgres...)
-                ),
-                Ref("Expression_C_Grammar"),
-            ),
+    # The upstream grammar is defined recursively, which if implemented naively
+    # will cause SQLFluff to overflow the stack from recursive function calls.
+    # To work around this, the a_expr grammar is reworked a bit into sub-grammars
+    # that effectively provide tail recursion.
+    Expression_A_Unary_Operator_Grammar=OneOf(
+        # This grammar corresponds to the unary operator portion of the initial
+        # recursive block on the Cockroach Labs a_expr grammar.  It includes the
+        # unary operator matching sub-block, but not the recursive call to a_expr.
+        Ref(
+            "SignedSegmentGrammar",
+            exclude=Sequence(Ref("QualifiedNumericLiteralSegment")),
         ),
+        Ref("TildeSegment"),
+        Ref("NotOperatorGrammar"),
+        # used in CONNECT BY clauses (EXASOL, Snowflake, Postgres...)
+        "PRIOR",
+    ),
+    Tail_Recurse_Expression_A_Grammar=Sequence(
+        # This should be used instead of a recursive call to Expression_A_Grammar
+        # whenever the repeating element in Expression_A_Grammar makes a recursive
+        # call to itself at the _end_.  If it's in the middle then you still need
+        # to recurse into Expression_A_Grammar normally.
+        AnyNumberOf(Ref("Expression_A_Unary_Operator_Grammar")),
+        Ref("Expression_C_Grammar"),
+    ),
+    Expression_A_Grammar=Sequence(
+        # Grammar always starts with optional unary operator, plus c_expr.  This
+        # section must always match the tail recurse grammar.
+        Ref("Tail_Recurse_Expression_A_Grammar"),
+        # As originally pictured in the diagram, the grammar then repeats itself
+        # for any number of times with a loop.
         AnyNumberOf(
             OneOf(
+                # This corresponds to the big repeating block in the diagram that
+                # has like dozens and dozens of possibilities.  Some of them are
+                # recursive.  If the item __ends__ with a recursive call to "a_expr",
+                # use Ref("Tail_Recurse_Expression_A_Grammar") instead so that the
+                # stack depth can be minimized.  If the item has a recursive call
+                # in the middle of the expression, you'll need to recurse
+                # Expression_A_Grammar normally.
+                #
+                # We need to add a lot more here...
                 Sequence(
-                    OneOf(
-                        Sequence(
-                            Ref.keyword("NOT", optional=True),
-                            Ref("LikeGrammar"),
-                        ),
-                        Sequence(
-                            Ref("BinaryOperatorGrammar"),
-                            Ref.keyword("NOT", optional=True),
-                        ),
-                        # We need to add a lot more here...
+                    Sequence(
+                        Ref.keyword("NOT", optional=True),
+                        Ref("LikeGrammar"),
                     ),
-                    Ref("Expression_C_Grammar"),
+                    Ref("Expression_A_Grammar"),
                     Sequence(
                         Ref.keyword("ESCAPE"),
-                        Ref("Expression_C_Grammar"),
+                        Ref("Tail_Recurse_Expression_A_Grammar"),
                         optional=True,
                     ),
                 ),
+                Sequence(
+                    Ref("BinaryOperatorGrammar"),
+                    Ref("Tail_Recurse_Expression_A_Grammar"),
+                ),
                 Sequence(
                     Ref.keyword("NOT", optional=True),
                     "IN",
@@ -1847,18 +1998,12 @@ ansi_dialect.add(
                 Ref("IsNullGrammar"),
                 Ref("NotNullGrammar"),
                 Ref("CollateGrammar"),
-                Sequence(
-                    # e.g. NOT EXISTS, but other expressions could be met as
-                    # well by inverting the condition with the NOT operator
-                    "NOT",
-                    Ref("Expression_C_Grammar"),
-                ),
                 Sequence(
                     Ref.keyword("NOT", optional=True),
                     "BETWEEN",
                     Ref("Expression_B_Grammar"),
                     "AND",
-                    Ref("Expression_A_Grammar"),
+                    Ref("Tail_Recurse_Expression_A_Grammar"),
                 ),
             )
         ),
@@ -1866,22 +2011,37 @@ ansi_dialect.add(
     # Expression_B_Grammar: Does not directly feed into Expression_A_Grammar
     # but is used for a BETWEEN statement within Expression_A_Grammar.
     # https://www.cockroachlabs.com/docs/v20.2/sql-grammar.htm#b_expr
-    Expression_B_Grammar=Sequence(
-        OneOf(
-            Ref("Expression_C_Grammar"),
-            Sequence(
-                Ref("SignedSegmentGrammar"),
-                Ref("Expression_B_Grammar"),
-            ),
+    #
+    # We use a similar trick as seen with Expression_A_Grammar to avoid recursion
+    # by using a tail recursion grammar.  See the comments for a_expr to see how
+    # that works.
+    Expression_B_Unary_Operator_Grammar=OneOf(
+        Ref(
+            "SignedSegmentGrammar",
+            exclude=Sequence(Ref("QualifiedNumericLiteralSegment")),
         ),
+        Ref("TildeSegment"),
+    ),
+    Tail_Recurse_Expression_B_Grammar=Sequence(
+        # Only safe to use if the recursive call is at the END of the repeating
+        # element in the main b_expr portion
+        AnyNumberOf(Ref("Expression_B_Unary_Operator_Grammar")),
+        Ref("Expression_C_Grammar"),
+    ),
+    Expression_B_Grammar=Sequence(
+        # Always start with tail recursion element!
+        Ref("Tail_Recurse_Expression_B_Grammar"),
         AnyNumberOf(
-            Sequence(
-                OneOf(
-                    Ref("ArithmeticBinaryOperatorGrammar"),
-                    Ref("StringBinaryOperatorGrammar"),
-                    Ref("ComparisonOperatorGrammar"),
+            OneOf(
+                Sequence(
+                    OneOf(
+                        Ref("ArithmeticBinaryOperatorGrammar"),
+                        Ref("StringBinaryOperatorGrammar"),
+                        Ref("ComparisonOperatorGrammar"),
+                    ),
+                    Ref("Tail_Recurse_Expression_B_Grammar"),
                 ),
-                Ref("Expression_C_Grammar"),
+                # TODO: Add more things from b_expr here
             ),
         ),
     ),
@@ -1895,7 +2055,7 @@ ansi_dialect.add(
                 Ref("Expression_D_Grammar"),
                 Ref("CaseExpressionSegment"),
             ),
-            AnyNumberOf(Ref("TimeZoneGrammar")),
+            AnyNumberOf(Ref("TimeZoneGrammar"), optional=True),
         ),
         Ref("ShorthandCastSegment"),
     ),
@@ -1928,8 +2088,8 @@ ansi_dialect.add(
             Ref("SelectStatementSegment"),
             Ref("LiteralGrammar"),
             Ref("IntervalExpressionSegment"),
-            Ref("TypelessStructSegment"),
-            Ref("TypelessArraySegment"),
+            Ref("TypedStructLiteralSegment"),
+            Ref("ArrayExpressionSegment"),
             Ref("ColumnReferenceSegment"),
             # For triggers, we allow "NEW.*" but not just "*" nor "a.b.*"
             # So can't use WildcardIdentifierSegment nor WildcardExpressionSegment
@@ -2075,7 +2235,15 @@ class WhereClauseSegment(BaseSegment):
     )
     parse_grammar: Optional[Matchable] = Sequence(
         "WHERE",
-        Indent,
+        # NOTE: The indent here is implicit to allow
+        # constructions like:
+        #
+        #    WHERE a
+        #        AND b
+        #
+        # to be valid without forcing an indent between
+        # "WHERE" and "a".
+        ImplicitIndent,
         OptionallyBracketed(Ref("ExpressionSegment")),
         Dedent,
     )
@@ -2154,7 +2322,7 @@ class HavingClauseSegment(BaseSegment):
     )
     parse_grammar: Optional[Matchable] = Sequence(
         "HAVING",
-        Indent,
+        ImplicitIndent,
         OptionallyBracketed(Ref("ExpressionSegment")),
         Dedent,
     )
@@ -2167,16 +2335,14 @@ class LimitClauseSegment(BaseSegment):
     match_grammar: Matchable = Sequence(
         "LIMIT",
         Indent,
+        Ref("NumericLiteralSegment"),
         OneOf(
-            Ref("NumericLiteralSegment"),
-            Sequence(
-                Ref("NumericLiteralSegment"), "OFFSET", Ref("NumericLiteralSegment")
-            ),
+            Sequence("OFFSET", Ref("NumericLiteralSegment")),
             Sequence(
-                Ref("NumericLiteralSegment"),
                 Ref("CommaSegment"),
                 Ref("NumericLiteralSegment"),
             ),
+            optional=True,
         ),
         Dedent,
     )
@@ -2215,6 +2381,22 @@ class NamedWindowSegment(BaseSegment):
     )
 
 
+class FetchClauseSegment(BaseSegment):
+    """A `FETCH` clause like in `SELECT."""
+
+    type = "fetch_clause"
+    match_grammar: Matchable = Sequence(
+        "FETCH",
+        OneOf(
+            "FIRST",
+            "NEXT",
+        ),
+        Ref("NumericLiteralSegment"),
+        OneOf("ROW", "ROWS"),
+        "ONLY",
+    )
+
+
 class NamedWindowExpressionSegment(BaseSegment):
     """Named window expression."""
 
@@ -2222,8 +2404,11 @@ class NamedWindowExpressionSegment(BaseSegment):
     match_grammar: Matchable = Sequence(
         Ref("SingleIdentifierGrammar"),  # Window name
         "AS",
-        Bracketed(
-            Ref("WindowSpecificationSegment"),
+        OneOf(
+            Ref("SingleIdentifierGrammar"),  # Window name
+            Bracketed(
+                Ref("WindowSpecificationSegment"),
+            ),
         ),
     )
 
@@ -2279,7 +2464,6 @@ class UnorderedSelectStatementSegment(BaseSegment):
             Ref("WithDataClauseSegment"),
             Ref("OrderByClauseSegment"),
             Ref("LimitClauseSegment"),
-            Ref("NamedWindowSegment"),
         ),
         enforce_whitespace_preceding_terminator=True,
     )
@@ -2294,6 +2478,7 @@ class UnorderedSelectStatementSegment(BaseSegment):
         Ref("GroupByClauseSegment", optional=True),
         Ref("HavingClauseSegment", optional=True),
         Ref("OverlapsClauseSegment", optional=True),
+        Ref("NamedWindowSegment", optional=True),
     )
 
 
@@ -2321,6 +2506,7 @@ class SelectStatementSegment(BaseSegment):
     parse_grammar: Matchable = UnorderedSelectStatementSegment.parse_grammar.copy(
         insert=[
             Ref("OrderByClauseSegment", optional=True),
+            Ref("FetchClauseSegment", optional=True),
             Ref("LimitClauseSegment", optional=True),
             Ref("NamedWindowSegment", optional=True),
         ]
@@ -2332,6 +2518,7 @@ ansi_dialect.add(
     SelectableGrammar=OneOf(
         OptionallyBracketed(Ref("WithCompoundStatementSegment")),
         Ref("NonWithSelectableGrammar"),
+        Bracketed(Ref("SelectableGrammar")),
     ),
     # Things that behave like select statements, which can form part of with
     # expressions.
@@ -2355,6 +2542,7 @@ ansi_dialect.add(
         # otherwise we can't because any order by clauses should belong
         # to the set expression.
         Bracketed(Ref("SelectStatementSegment")),
+        Bracketed(Ref("NonSetSelectableGrammar")),
     ),
 )
 
@@ -2586,9 +2774,7 @@ class MergeInsertClauseSegment(BaseSegment):
         Indent,
         Ref("BracketedColumnReferenceListGrammar", optional=True),
         Dedent,
-        Indent,
         Ref("ValuesClauseSegment", optional=True),
-        Dedent,
     )
 
 
@@ -2635,6 +2821,7 @@ class ColumnConstraintSegment(BaseSegment):
             Sequence(  # DEFAULT <value>
                 "DEFAULT",
                 OneOf(
+                    Ref("ShorthandCastSegment"),
                     Ref("LiteralGrammar"),
                     Ref("FunctionSegment"),
                     Ref("BareFunctionSegment"),
@@ -2642,7 +2829,7 @@ class ColumnConstraintSegment(BaseSegment):
             ),
             Ref("PrimaryKeyGrammar"),
             Ref("UniqueKeyGrammar"),  # UNIQUE
-            "AUTO_INCREMENT",  # AUTO_INCREMENT (MySQL)
+            Ref("AutoIncrementGrammar"),
             Ref("ReferenceDefinitionGrammar"),  # REFERENCES reftable [ ( refcolumn) ]x
             Ref("CommentClauseSegment"),
         ),
@@ -2719,33 +2906,16 @@ class TableEndClauseSegment(BaseSegment):
     match_grammar: Matchable = Nothing()
 
 
-class TypelessStructSegment(BaseSegment):
-    """Expression to construct a STRUCT with implicit types.
-
-    (Yes in BigQuery for example)
-    """
-
-    type = "typeless_struct"
-    match_grammar: Matchable = Nothing()
-
-
-class TypelessArraySegment(BaseSegment):
+class ArrayExpressionSegment(BaseSegment):
     """Expression to construct a ARRAY from a subquery.
 
     (Yes in BigQuery for example)
-    """
-
-    type = "typeless_array"
-    match_grammar: Matchable = Nothing()
 
-
-class StructTypeSegment(BaseSegment):
-    """Expression to construct a STRUCT datatype.
-
-    (Used in BigQuery for example)
+    NOTE: This differs from an array _literal_ in that it
+    takes the form of an expression.
     """
 
-    type = "struct_type"
+    type = "array_expression"
     match_grammar: Matchable = Nothing()
 
 
@@ -2991,11 +3161,10 @@ class DropIndexStatementSegment(BaseSegment):
     """A `DROP INDEX` statement."""
 
     type = "drop_index_statement"
-    # DROP INDEX <Index name> [CONCURRENTLY] [IF EXISTS] {RESTRICT | CASCADE}
+    # DROP INDEX <Index name> [IF EXISTS] {RESTRICT | CASCADE}
     match_grammar: Matchable = Sequence(
         "DROP",
         "INDEX",
-        Ref.keyword("CONCURRENTLY", optional=True),
         Ref("IfExistsGrammar", optional=True),
         Ref("IndexReferenceSegment"),
         Ref("DropBehaviorGrammar", optional=True),
@@ -3312,6 +3481,60 @@ class SetClauseSegment(BaseSegment):
     )
 
 
+class CreateCastStatementSegment(BaseSegment):
+    """A `CREATE CAST` statement.
+
+    https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#_11_63_user_defined_cast_definition
+    """
+
+    type = "create_cast_statement"
+
+    match_grammar: Matchable = Sequence(
+        "CREATE",
+        "CAST",
+        Bracketed(
+            Ref("DatatypeSegment"),
+            "AS",
+            Ref("DatatypeSegment"),
+        ),
+        "WITH",
+        Ref.keyword("SPECIFIC", optional=True),
+        OneOf(
+            "ROUTINE",
+            "FUNCTION",
+            "PROCEDURE",
+            Sequence(
+                OneOf("INSTANCE", "STATIC", "CONSTRUCTOR", optional=True),
+                "METHOD",
+            ),
+        ),
+        Ref("FunctionNameSegment"),
+        Ref("FunctionParameterListGrammar", optional=True),
+        Sequence("FOR", Ref("ObjectReferenceSegment"), optional=True),
+        Sequence("AS", "ASSIGNMENT", optional=True),
+    )
+
+
+class DropCastStatementSegment(BaseSegment):
+    """A `DROP CAST` statement.
+
+    https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#_11_64_drop_user_defined_cast_statement
+    """
+
+    type = "drop_cast_statement"
+
+    match_grammar: Matchable = Sequence(
+        "DROP",
+        "CAST",
+        Bracketed(
+            Ref("DatatypeSegment"),
+            "AS",
+            Ref("DatatypeSegment"),
+        ),
+        Ref("DropBehaviorGrammar", optional=True),
+    )
+
+
 class FunctionDefinitionGrammar(BaseSegment):
     """This is the body of a `CREATE FUNCTION AS` statement."""
 
@@ -3533,6 +3756,8 @@ class StatementSegment(BaseSegment):
         Ref("CreateViewStatementSegment"),
         Ref("DeleteStatementSegment"),
         Ref("UpdateStatementSegment"),
+        Ref("CreateCastStatementSegment"),
+        Ref("DropCastStatementSegment"),
         Ref("CreateFunctionStatementSegment"),
         Ref("DropFunctionStatementSegment"),
         Ref("CreateModelStatementSegment"),
@@ -3545,7 +3770,6 @@ class StatementSegment(BaseSegment):
         Ref("DropSequenceStatementSegment"),
         Ref("CreateTriggerStatementSegment"),
         Ref("DropTriggerStatementSegment"),
-        Bracketed(Ref("StatementSegment")),
     )
 
     def get_table_references(self):
@@ -3817,7 +4041,10 @@ class DropTriggerStatementSegment(BaseSegment):
     type = "drop_trigger"
 
     match_grammar: Matchable = Sequence(
-        "DROP", "TRIGGER", Ref("TriggerReferenceSegment")
+        "DROP",
+        "TRIGGER",
+        Ref("IfExistsGrammar", optional=True),
+        Ref("TriggerReferenceSegment"),
     )
 
 
diff --git a/src/sqlfluff/dialects/dialect_ansi_keywords.py b/src/sqlfluff/dialects/dialect_ansi_keywords.py
index 68f83fa..f0d9097 100644
--- a/src/sqlfluff/dialects/dialect_ansi_keywords.py
+++ b/src/sqlfluff/dialects/dialect_ansi_keywords.py
@@ -12,6 +12,7 @@ OUTER
 INTERVAL
 CASE
 FULL
+NOT
 NULL
 UNION
 IGNORE
@@ -135,7 +136,6 @@ COMMITTED
 COMPLETION
 COMPRESS
 COMPUTE
-CONCURRENTLY
 CONDITION
 CONDITION_NUMBER
 CONNECT
@@ -185,6 +185,7 @@ DATETIME
 DATETIME_INTERVAL_CODE
 DATETIME_INTERVAL_PRECISION
 DAY
+DAYS
 DAY_HOUR
 DAY_MICROSECOND
 DAY_MINUTE
@@ -491,7 +492,6 @@ NOORDER
 NORMALIZE
 NORMALIZED
 NOSUPERUSER
-NOT
 NOTHING
 NOTIFY
 NOTNULL
diff --git a/src/sqlfluff/dialects/dialect_athena.py b/src/sqlfluff/dialects/dialect_athena.py
index 73d6f67..a040595 100644
--- a/src/sqlfluff/dialects/dialect_athena.py
+++ b/src/sqlfluff/dialects/dialect_athena.py
@@ -21,8 +21,7 @@ from sqlfluff.core.parser import (
     StringParser,
     SymbolSegment,
 )
-from sqlfluff.core.parser.grammar.anyof import AnySetOf
-from sqlfluff.core.parser.segments.raw import CodeSegment, KeywordSegment, RawSegment
+from sqlfluff.core.parser.segments.raw import CodeSegment, KeywordSegment
 from sqlfluff.dialects import dialect_ansi as ansi
 from sqlfluff.dialects.dialect_athena_keywords import (
     athena_reserved_keywords,
@@ -73,8 +72,7 @@ athena_dialect.add(
         Ref("EqualsSegment"),
         Ref("QuotedLiteralSegment"),
     ),
-    LocationGrammar=Sequence("LOCATION", Ref("S3UrlGrammar")),
-    S3UrlGrammar=RegexParser(r"^'s3://.*", RawSegment),
+    LocationGrammar=Sequence("LOCATION", Ref("QuotedLiteralSegment")),
     BracketedPropertyListGrammar=Bracketed(Delimited(Ref("PropertyGrammar"))),
     CTASPropertyGrammar=Sequence(
         OneOf(
@@ -87,11 +85,42 @@ athena_dialect.add(
             "orc_compression",
             "parquet_compression",
             "field_delimiter",
+            "location",
         ),
         Ref("EqualsSegment"),
         Ref("LiteralGrammar"),
     ),
-    BracketedCTASPropertyGrammar=Bracketed(Delimited(Ref("CTASPropertyGrammar"))),
+    CTASIcebergPropertyGrammar=Sequence(
+        OneOf(
+            "external_location",
+            "format",
+            "partitioned_by",
+            "bucketed_by",
+            "bucket_count",
+            "write_compression",
+            "orc_compression",
+            "parquet_compression",
+            "field_delimiter",
+            "location",
+            "is_external",
+            "table_type",
+            "partitioning",
+            "vacuum_max_snapshot_age_ms",
+            "vacuum_min_snapshots_to_keep",
+        ),
+        Ref("EqualsSegment"),
+        Ref("LiteralGrammar"),
+    ),
+    BracketedCTASPropertyGrammar=Bracketed(
+        OneOf(
+            Delimited(
+                Ref("CTASPropertyGrammar"),
+            ),
+            Delimited(
+                Ref("CTASIcebergPropertyGrammar"),
+            ),
+        ),
+    ),
     UnloadPropertyGrammar=Sequence(
         OneOf(
             "format",
@@ -165,17 +194,34 @@ athena_dialect.add(
 )
 
 athena_dialect.replace(
+    LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy(
+        insert=[
+            Ref("ParameterSegment"),
+        ]
+    ),
+    Accessor_Grammar=Sequence(
+        AnyNumberOf(
+            Ref("ArrayAccessorSegment"),
+            optional=True,
+        ),
+        AnyNumberOf(
+            Sequence(
+                Ref("ObjectReferenceDelimiterGrammar"),
+                Ref("ObjectReferenceSegment"),
+            ),
+            optional=True,
+        ),
+    ),
     QuotedLiteralSegment=OneOf(
         TypedParser("single_quote", ansi.LiteralSegment, type="quoted_literal"),
         TypedParser("double_quote", ansi.LiteralSegment, type="quoted_literal"),
         TypedParser("back_quote", ansi.LiteralSegment, type="quoted_literal"),
     ),
-    SimpleArrayTypeGrammar=Ref.keyword("ARRAY"),
     TrimParametersGrammar=Nothing(),
     NakedIdentifierSegment=SegmentGenerator(
         # Generate the anti template from the set of reserved keywords
         lambda dialect: RegexParser(
-            r"([_]+|[A-Z0-9_]*[A-Z][A-Z0-9_]*)",
+            r"[A-Z0-9_]*[A-Z_][A-Z0-9_]*",
             ansi.IdentifierSegment,
             type="naked_identifier",
             anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$",
@@ -197,13 +243,86 @@ athena_dialect.replace(
 )
 
 
+class ArrayTypeSegment(ansi.ArrayTypeSegment):
+    """Prefix for array literals specifying the type."""
+
+    type = "array_type"
+    match_grammar = Sequence(
+        "ARRAY",
+        Ref("ArrayTypeSchemaSegment", optional=True),
+    )
+
+
+class ArrayTypeSchemaSegment(ansi.ArrayTypeSegment):
+    """Prefix for array literals specifying the type."""
+
+    type = "array_type_schema"
+    match_grammar = Bracketed(
+        Ref("DatatypeSegment"),
+        bracket_pairs_set="angle_bracket_pairs",
+        bracket_type="angle",
+    )
+
+
+class MapTypeSegment(BaseSegment):
+    """Expression to construct a MAP datatype."""
+
+    type = "map_type"
+    match_grammar = Sequence(
+        "MAP",
+        Ref("MapTypeSchemaSegment", optional=True),
+    )
+
+
+class MapTypeSchemaSegment(BaseSegment):
+    """Expression to construct the schema of a MAP datatype."""
+
+    type = "map_type_schema"
+    match_grammar = Bracketed(
+        Sequence(
+            Ref("PrimitiveTypeSegment"),
+            Ref("CommaSegment"),
+            Ref("DatatypeSegment"),
+        ),
+        bracket_pairs_set="angle_bracket_pairs",
+        bracket_type="angle",
+    )
+
+
+class StructTypeSegment(ansi.StructTypeSegment):
+    """Expression to construct a STRUCT datatype."""
+
+    match_grammar = Sequence(
+        "STRUCT",
+        Ref("StructTypeSchemaSegment", optional=True),
+    )
+
+
+class StructTypeSchemaSegment(BaseSegment):
+    """Expression to construct the schema of a STRUCT datatype."""
+
+    type = "struct_type_schema"
+    match_grammar = Bracketed(
+        Delimited(
+            Sequence(
+                Ref("NakedIdentifierSegment"),
+                Ref("ColonSegment"),
+                Ref("DatatypeSegment"),
+                Ref("CommentGrammar", optional=True),
+            ),
+            bracket_pairs_set="angle_bracket_pairs",
+        ),
+        bracket_pairs_set="angle_bracket_pairs",
+        bracket_type="angle",
+    )
+
+
 class PrimitiveTypeSegment(BaseSegment):
-    """Primitive data types.
+    """Support Athena subset of Hive types.
 
-    Since DDL is based on Hive and DML based on Prestodb this class has
-    primitives that may not work on specific situations
-    - Hive: https://cwiki.apache.org/confluence/display/hive/languagemanual+types
-    - PrestoDb: https://prestodb.io/docs/0.217/language/types.html
+    Primary Source: https://docs.aws.amazon.com/athena/latest/ug/data-types.html
+    Additional Details:
+        https://cwiki.apache.org/confluence/display/Hive/LanguageManual+Types
     """
 
     type = "primitive_type"
@@ -211,114 +330,56 @@ class PrimitiveTypeSegment(BaseSegment):
         "BOOLEAN",
         "TINYINT",
         "SMALLINT",
-        "INTEGER",
-        "INT",
+        "INTEGER",  # used in DML queries
+        "INT",  # used in DDL queries
         "BIGINT",
-        "REAL",
-        "FLOAT",
-        Sequence("DOUBLE", Ref.keyword("PRECISION", optional=True)),
+        "DOUBLE",
+        "FLOAT",  # used in DDL
+        "REAL",  # used "in SQL functions like SELECT CAST"
         Sequence(
-            "DECIMAL",
-            Bracketed(
-                Ref("NumericLiteralSegment"),
-                Ref("CommaSegment"),
-                Ref("NumericLiteralSegment"),
-                optional=True,
-            ),
+            OneOf("DECIMAL", "CHAR", "VARCHAR"),
+            Ref("BracketedArguments", optional=True),
         ),
-        "NUMERIC",
         "STRING",
-        "VARCHAR",
-        "CHAR",
-        "VARBINARY",
-        "JSON",
+        "BINARY",
         "DATE",
         "TIMESTAMP",
-        "INTERVAL",
+        "VARBINARY",
+        "JSON",
         "TIME",
         "IPADDRESS",
         "HyperLogLog",
         "P4HyperLogLog",
-        "QDigest",
     )
 
 
 class DatatypeSegment(BaseSegment):
-    """Data types."""
+    """Support complex Athena data types.
+
+    Complex data types are typically used in either DDL statements or as
+    the target type in casts.
+    """
 
     type = "data_type"
     match_grammar = OneOf(
         Ref("PrimitiveTypeSegment"),
-        Sequence(
-            "ARRAY",
-            Bracketed(
-                Ref("DatatypeSegment"),
-                bracket_pairs_set="angle_bracket_pairs",
-                bracket_type="angle",
-            ),
-        ),
-        Sequence(
-            "MAP",
-            Bracketed(
-                Sequence(
-                    Ref("PrimitiveTypeSegment"),
-                    Ref("CommaSegment"),
-                    Ref("DatatypeSegment"),
-                ),
-                bracket_pairs_set="angle_bracket_pairs",
-                bracket_type="angle",
-            ),
-        ),
-        Sequence(
-            "STRUCT",
-            Bracketed(
-                Delimited(
-                    Sequence(
-                        Ref("NakedIdentifierSegment"),
-                        Ref("ColonSegment"),
-                        Ref("DatatypeSegment"),
-                        Ref("CommentGrammar", optional=True),
-                    ),
-                    bracket_pairs_set="angle_bracket_pairs",
-                ),
-                bracket_pairs_set="angle_bracket_pairs",
-                bracket_type="angle",
-            ),
-        ),
-        # Only hive
-        Sequence(
-            "UNIONTYPE",
-            Bracketed(
-                Delimited(
-                    Ref("DatatypeSegment"), bracket_pairs_set="angle_bracket_pairs"
-                ),
-                bracket_pairs_set="angle_bracket_pairs",
-                bracket_type="angle",
-            ),
-        ),
-        # Only PrestoDb
+        Ref("StructTypeSegment"),
+        Ref("ArrayTypeSegment"),
+        Ref("MapTypeSegment"),
         Sequence(
             "ROW",
             Bracketed(
                 Delimited(
-                    AnySetOf(
-                        Sequence(Ref("NakedIdentifierSegment"), Ref("DatatypeSegment")),
+                    AnyNumberOf(
+                        Sequence(
+                            Ref("NakedIdentifierSegment"),
+                            Ref("DatatypeSegment"),
+                        ),
                         Ref("LiteralGrammar"),
                     )
                 )
             ),
         ),
-        # array types
-        OneOf(
-            AnyNumberOf(
-                Bracketed(
-                    Ref("ExpressionSegment", optional=True), bracket_type="square"
-                )
-            ),
-            Ref("SimpleArrayTypeGrammar"),
-            Sequence(Ref("SimpleArrayTypeGrammar"), Ref("ArrayLiteralSegment")),
-            optional=True,
-        ),
         Ref("DatetimeWithTZSegment"),
     )
 
@@ -327,7 +388,12 @@ class StatementSegment(ansi.StatementSegment):
     """Overriding StatementSegment to allow for additional segment parsing."""
 
     parse_grammar = ansi.StatementSegment.parse_grammar.copy(
-        insert=[Ref("MsckRepairTableStatementSegment"), Ref("UnloadStatementSegment")],
+        insert=[
+            Ref("MsckRepairTableStatementSegment"),
+            Ref("UnloadStatementSegment"),
+            Ref("PrepareStatementSegment"),
+            Ref("ExecuteStatementSegment"),
+        ],
         remove=[
             Ref("TransactionStatementSegment"),
             Ref("CreateSchemaStatementSegment"),
@@ -405,7 +471,9 @@ class CreateTableStatementSegment(BaseSegment):
             Sequence(
                 Sequence("WITH", Ref("BracketedCTASPropertyGrammar"), optional=True),
                 "AS",
-                OptionallyBracketed(Ref("SelectableGrammar")),
+                OptionallyBracketed(
+                    Ref("SelectableGrammar"),
+                ),
                 Sequence("WITH NO DATA", optional=True),
             ),
         ),
@@ -510,6 +578,49 @@ class UnloadStatementSegment(BaseSegment):
     )
 
 
+class PrepareStatementSegment(BaseSegment):
+    """A `prepare` statement.
+
+    https://docs.aws.amazon.com/athena/latest/ug/querying-with-prepared-statements.html
+    """
+
+    type = "prepare_statement"
+    match_grammar = Sequence(
+        "PREPARE",
+        Ref("TableReferenceSegment"),
+        "FROM",
+        OptionallyBracketed(
+            OneOf(
+                Ref("SelectableGrammar"),
+                Ref("UnloadStatementSegment"),
+                Ref("InsertStatementSegment"),
+            ),
+        ),
+    )
+
+
+class ExecuteStatementSegment(BaseSegment):
+    """An `execute` statement.
+
+    https://docs.aws.amazon.com/athena/latest/ug/querying-with-prepared-statements.html
+    """
+
+    type = "execute_statement"
+    match_grammar = Sequence(
+        "EXECUTE",
+        Ref("TableReferenceSegment"),
+        OneOf(
+            Sequence(
+                "USING",
+                Delimited(
+                    Ref("LiteralGrammar"),
+                ),
+            ),
+            optional=True,
+        ),
+    )
+
+
 class IntervalExpressionSegment(BaseSegment):
     """An interval expression segment.
 
diff --git a/src/sqlfluff/dialects/dialect_athena_keywords.py b/src/sqlfluff/dialects/dialect_athena_keywords.py
index 83c2b3d..cd7bef4 100644
--- a/src/sqlfluff/dialects/dialect_athena_keywords.py
+++ b/src/sqlfluff/dialects/dialect_athena_keywords.py
@@ -33,8 +33,6 @@ athena_reserved_keywords = [
     "CURRENT",
     "CURSOR",
     "DATABASE",
-    "DATE",
-    "DAYOFWEEK",
     "DECIMAL",
     "DELETE",
     "DESCRIBE",
@@ -99,6 +97,7 @@ athena_reserved_keywords = [
     "PERCENT",
     "PRECEDING",
     "PRECISION",
+    "PREPARE",
     "PRESERVE",
     "PRIMARY",
     "PROCEDURE",
@@ -112,8 +111,6 @@ athena_reserved_keywords = [
     "RLIKE",
     "ROLLBACK",
     "ROLLUP",
-    "ROW",
-    "ROWS",
     "SELECT",
     "SET",
     "SMALLINT",
@@ -122,8 +119,6 @@ athena_reserved_keywords = [
     "TABLE",
     "TABLESAMPLE",
     "THEN",
-    "TIME",
-    "TIMESTAMP",
     "TO",
     "TRANSFORM",
     "TRIGGER",
@@ -133,15 +128,12 @@ athena_reserved_keywords = [
     "UNION",
     "UNIQUEJOIN",
     "UPDATE",
-    "USER",
     "USING",
     "UTC_TMESTAMP",
     "VALUES",
     "VARCHAR",
-    "VIEWS",
     "WHEN",
     "WHERE",
-    "WINDOW",
     "WITH",
 ]
 
@@ -175,8 +167,10 @@ athena_unreserved_keywords = [
     "CONTINUE",
     "DATA",
     "DATABASES",
+    "DATE",
     "DATETIME",
     "DAY",
+    "DAYOFWEEK",
     "DBPROPERTIES",
     "DEFERRED",
     "DEFINED",
@@ -200,7 +194,6 @@ athena_unreserved_keywords = [
     "FILEFORMAT",
     "FIRST",
     "FORMAT",
-    "FORMAT",
     "FORMATTED",
     "FUNCTIONS",
     "HOLD_DDLTIME",
@@ -214,6 +207,7 @@ athena_unreserved_keywords = [
     "INPUTDRIVER",
     "INPUTFORMAT",
     "IPADDRESS",
+    "IS_EXTERNAL",
     "ISOLATION",
     "ITEMS",
     "JAR",
@@ -254,6 +248,7 @@ athena_unreserved_keywords = [
     "PARQUET_COMPRESSION",
     "PARTITIONED_BY",
     "PARTITIONED",
+    "PARTITIONING",
     "PARTITIONS",
     "PLUS",
     "PRETTY",
@@ -278,6 +273,8 @@ athena_unreserved_keywords = [
     "RLIKE",
     "ROLE",
     "ROLES",
+    "ROW",
+    "ROWS",
     "SCHEMA",
     "SCHEMAS",
     "SECOND",
@@ -299,10 +296,13 @@ athena_unreserved_keywords = [
     "STREAMTABLE",
     "STRING",
     "STRUCT",
+    "TABLE_TYPE",
     "TABLES",
     "TBLPROPERTIES",
     "TEMPORARY",
     "TERMINATED",
+    "TIME",
+    "TIMESTAMP",
     "TIMESTAMPTZ",
     "TINYINT",
     "TOUCH",
@@ -317,11 +317,16 @@ athena_unreserved_keywords = [
     "UNSIGNED",
     "URI",
     "USE",
+    "USER",
     "UTC",
     "UTCTIMESTAMP",
+    "VACUUM_MAX_SNAPSHOT_AGE_MS",
+    "VACUUM_MIN_SNAPSHOTS_TO_KEEP",
     "VALIDATE",
     "VALUE_TYPE",
     "VIEW",
+    "VIEWS",
+    "WINDOW",
     "WHILE",
     "WRITE_COMPRESSION",
     "YEAR",
diff --git a/src/sqlfluff/dialects/dialect_bigquery.py b/src/sqlfluff/dialects/dialect_bigquery.py
index 414f76d..6bccf7c 100644
--- a/src/sqlfluff/dialects/dialect_bigquery.py
+++ b/src/sqlfluff/dialects/dialect_bigquery.py
@@ -131,6 +131,14 @@ bigquery_dialect.add(
         ansi.IdentifierSegment,
         type="naked_identifier_all",
     ),
+    NakedIdentifierPart=RegexParser(
+        # The part of a an identifier after a hyphen.
+        # NOTE: This one can match an "all numbers" variant.
+        # https://cloud.google.com/resource-manager/docs/creating-managing-projects
+        r"[A-Z0-9_]+",
+        ansi.IdentifierSegment,
+        type="naked_identifier",
+    ),
     SingleIdentifierFullGrammar=OneOf(
         Ref("NakedIdentifierSegment"),
         Ref("QuotedIdentifierSegment"),
@@ -146,6 +154,9 @@ bigquery_dialect.add(
             Ref("ArrayLiteralSegment"),
             Ref("TupleSegment"),
             Ref("BaseExpressionElementGrammar"),
+            terminators=[
+                Ref("SemicolonSegment"),
+            ],
         ),
     ),
     ExtendedDatetimeUnitSegment=SegmentGenerator(
@@ -203,14 +214,6 @@ bigquery_dialect.replace(
         Ref("NamedArgumentSegment"),
     ),
     TrimParametersGrammar=Nothing(),
-    SimpleArrayTypeGrammar=Sequence(
-        "ARRAY",
-        Bracketed(
-            Ref("DatatypeSegment"),
-            bracket_type="angle",
-            bracket_pairs_set="angle_bracket_pairs",
-        ),
-    ),
     # BigQuery allows underscore in parameter names, and also anything if quoted in
     # backticks
     ParameterNameSegment=OneOf(
@@ -245,13 +248,13 @@ bigquery_dialect.replace(
 
 # Set Keywords
 bigquery_dialect.sets("unreserved_keywords").clear()
-bigquery_dialect.sets("unreserved_keywords").update(
-    [n.strip().upper() for n in bigquery_unreserved_keywords.split("\n")]
+bigquery_dialect.update_keywords_set_from_multiline_string(
+    "unreserved_keywords", bigquery_unreserved_keywords
 )
 
 bigquery_dialect.sets("reserved_keywords").clear()
-bigquery_dialect.sets("reserved_keywords").update(
-    [n.strip().upper() for n in bigquery_reserved_keywords.split("\n")]
+bigquery_dialect.update_keywords_set_from_multiline_string(
+    "reserved_keywords", bigquery_reserved_keywords
 )
 
 # Add additional datetime units
@@ -311,6 +314,20 @@ bigquery_dialect.sets("angle_bracket_pairs").update(
 )
 
 
+class ArrayTypeSegment(ansi.ArrayTypeSegment):
+    """Prefix for array literals specifying the type."""
+
+    type = "array_type"
+    match_grammar = Sequence(
+        "ARRAY",
+        Bracketed(
+            Ref("DatatypeSegment"),
+            bracket_type="angle",
+            bracket_pairs_set="angle_bracket_pairs",
+        ),
+    )
+
+
 class QualifyClauseSegment(BaseSegment):
     """A `QUALIFY` clause like in `SELECT`."""
 
@@ -446,6 +463,7 @@ class StatementSegment(ansi.StatementSegment):
             Ref("LeaveStatementSegment"),
             Ref("ContinueStatementSegment"),
             Ref("RaiseStatementSegment"),
+            Ref("AlterViewStatementSegment"),
             Ref("CreateMaterializedViewStatementSegment"),
             Ref("AlterMaterializedViewStatementSegment"),
             Ref("DropMaterializedViewStatementSegment"),
@@ -463,6 +481,14 @@ class AssertStatementSegment(BaseSegment):
     match_grammar: Matchable = Sequence(
         "ASSERT",
         Ref("ExpressionSegment"),
+        Sequence(
+            "AS",
+            OneOf(
+                Ref("SingleQuotedLiteralSegment"),
+                Ref("DoubleQuotedLiteralSegment"),
+            ),
+            optional=True,
+        ),
     )
 
 
@@ -692,8 +718,8 @@ class SelectClauseModifierSegment(ansi.SelectClauseModifierSegment):
 
     match_grammar = Sequence(
         # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax
-        Sequence("AS", OneOf("STRUCT", "VALUE"), optional=True),
         OneOf("DISTINCT", "ALL", optional=True),
+        Sequence("AS", OneOf("STRUCT", "VALUE"), optional=True),
     )
 
 
@@ -904,20 +930,7 @@ class FunctionSegment(ansi.FunctionSegment):
                 # Functions returning STRUCTs in BigQuery can have the fields
                 # elements referenced (e.g. ".a"), including wildcards (e.g. ".*")
                 # or multiple nested fields (e.g. ".a.b", or ".a.b.c")
-                Sequence(
-                    Ref("DotSegment"),
-                    AnyNumberOf(
-                        Sequence(
-                            Ref("ParameterNameSegment"),
-                            Ref("DotSegment"),
-                        ),
-                    ),
-                    OneOf(
-                        Ref("ParameterNameSegment"),
-                        Ref("StarSegment"),
-                    ),
-                    optional=True,
-                ),
+                Ref("SemiStructuredAccessorSegment", optional=True),
                 Ref("PostFunctionGrammar", optional=True),
             ),
         ),
@@ -1016,10 +1029,10 @@ class DatatypeSegment(ansi.DatatypeSegment):
         Sequence(
             Ref("DatatypeIdentifierSegment"),  # Simple type
             # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#parameterized_data_types
-            Bracketed(Delimited(Ref("NumericLiteralSegment")), optional=True),
+            Ref("BracketedArguments", optional=True),
         ),
         Sequence("ANY", "TYPE"),  # SQL UDFs can specify this "type"
-        Ref("SimpleArrayTypeGrammar"),
+        Ref("ArrayTypeSegment"),
         Ref("StructTypeSegment"),
     )
 
@@ -1029,48 +1042,37 @@ class StructTypeSegment(ansi.StructTypeSegment):
 
     match_grammar = Sequence(
         "STRUCT",
-        Bracketed(
-            Delimited(  # Comma-separated list of field names/types
-                Sequence(
-                    OneOf(
-                        # ParameterNames can look like Datatypes so can't use
-                        # Optional=True here and instead do a OneOf in order
-                        # with DataType only first, followed by both.
-                        Ref("DatatypeSegment"),
-                        Sequence(
-                            Ref("ParameterNameSegment"),
-                            Ref("DatatypeSegment"),
-                        ),
-                    ),
-                    Ref("OptionsSegment", optional=True),
-                ),
-            ),
-            bracket_type="angle",
-            bracket_pairs_set="angle_bracket_pairs",
-        ),
+        Ref("StructTypeSchemaSegment", optional=True),
     )
 
 
-class TypelessStructSegment(ansi.TypelessStructSegment):
-    """Expression to construct a STRUCT with implicit types.
-
-    https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#typeless_struct_syntax
-    """
+class StructTypeSchemaSegment(BaseSegment):
+    """Expression to construct the schema of a STRUCT datatype."""
 
-    match_grammar = Sequence(
-        "STRUCT",
-        Bracketed(
-            Delimited(
-                Sequence(
-                    Ref("BaseExpressionElementGrammar"),
-                    Ref("AliasExpressionSegment", optional=True),
+    type = "struct_type_schema"
+    match_grammar = Bracketed(
+        Delimited(  # Comma-separated list of field names/types
+            Sequence(
+                OneOf(
+                    # ParameterNames can look like Datatypes so can't use
+                    # Optional=True here and instead do a OneOf in order
+                    # with DataType only first, followed by both.
+                    Ref("DatatypeSegment"),
+                    Sequence(
+                        Ref("ParameterNameSegment"),
+                        Ref("DatatypeSegment"),
+                    ),
                 ),
+                AnyNumberOf(Ref("ColumnConstraintSegment")),
+                Ref("OptionsSegment", optional=True),
             ),
         ),
+        bracket_type="angle",
+        bracket_pairs_set="angle_bracket_pairs",
     )
 
 
-class TypelessArraySegment(ansi.TypelessArraySegment):
+class ArrayExpressionSegment(ansi.ArrayExpressionSegment):
     """Expression to construct a ARRAY from a subquery.
 
     https://cloud.google.com/bigquery/docs/reference/standard-sql/array_functions#array
@@ -1121,17 +1123,18 @@ class SemiStructuredAccessorSegment(BaseSegment):
 
     type = "semi_structured_expression"
     match_grammar = Sequence(
-        Ref("DotSegment"),
-        Ref("SingleIdentifierGrammar"),
-        Ref("ArrayAccessorSegment", optional=True),
         AnyNumberOf(
             Sequence(
                 Ref("DotSegment"),
-                Ref("SingleIdentifierGrammar"),
+                OneOf(
+                    Ref("SingleIdentifierGrammar"),
+                    Ref("StarSegment"),
+                ),
                 allow_gaps=True,
             ),
             Ref("ArrayAccessorSegment", optional=True),
             allow_gaps=True,
+            min_times=1,
         ),
         allow_gaps=True,
     )
@@ -1237,7 +1240,7 @@ class TableReferenceSegment(ObjectReferenceSegment):
             AnyNumberOf(
                 Sequence(
                     Ref("DashSegment"),
-                    OneOf(Ref("SingleIdentifierGrammar"), Ref("NumericLiteralSegment")),
+                    Ref("NakedIdentifierPart"),
                     allow_gaps=False,
                 ),
                 optional=True,
@@ -1322,11 +1325,10 @@ class DeclareStatementSegment(BaseSegment):
         "DECLARE",
         Delimited(Ref("SingleIdentifierFullGrammar")),
         OneOf(
-            Ref("DatatypeSegment"),
             Ref("DefaultDeclareOptionsGrammar"),
             Sequence(
                 Ref("DatatypeSegment"),
-                Ref("DefaultDeclareOptionsGrammar"),
+                Ref("DefaultDeclareOptionsGrammar", optional=True),
             ),
         ),
     )
@@ -1429,9 +1431,7 @@ class ColumnDefinitionSegment(ansi.ColumnDefinitionSegment):
     match_grammar: Matchable = Sequence(
         Ref("SingleIdentifierGrammar"),  # Column name
         Ref("DatatypeSegment"),  # Column type
-        AnyNumberOf(
-            Ref("ColumnConstraintSegment", optional=True),
-        ),
+        AnyNumberOf(Ref("ColumnConstraintSegment")),
         Ref("OptionsSegment", optional=True),
     )
 
@@ -1456,14 +1456,10 @@ class CreateTableStatementSegment(ansi.CreateTableStatementSegment):
         Sequence(
             Bracketed(
                 Delimited(
-                    OneOf(
-                        Ref("TableConstraintSegment"),
-                        Ref("ColumnDefinitionSegment"),
-                    ),
+                    Ref("ColumnDefinitionSegment"),
                     allow_trailing=True,
                 )
             ),
-            Ref("CommentClauseSegment", optional=True),
             optional=True,
         ),
         Ref("PartitionBySegment", optional=True),
@@ -1478,6 +1474,91 @@ class CreateTableStatementSegment(ansi.CreateTableStatementSegment):
     )
 
 
+class AlterTableStatementSegment(ansi.AlterTableStatementSegment):
+    """A `ALTER TABLE` statement."""
+
+    match_grammar = Sequence(
+        "ALTER",
+        "TABLE",
+        Ref("IfExistsGrammar", optional=True),
+        Ref("TableReferenceSegment"),
+        OneOf(
+            # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_set_options_statement
+            Sequence(
+                "SET",
+                Ref("OptionsSegment"),
+            ),
+            # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_add_column_statement
+            Delimited(
+                Sequence(
+                    "ADD",
+                    "COLUMN",
+                    Ref("IfNotExistsGrammar", optional=True),
+                    Ref("ColumnDefinitionSegment"),
+                ),
+                allow_trailing=True,
+            ),
+            # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_rename_to_statement
+            Sequence(
+                "RENAME",
+                "TO",
+                Ref("TableReferenceSegment"),
+            ),
+            # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_rename_column_statement
+            Delimited(
+                Sequence(
+                    "RENAME",
+                    "COLUMN",
+                    Ref("IfExistsGrammar", optional=True),
+                    Ref("SingleIdentifierGrammar"),  # Column name
+                    "TO",
+                    Ref("SingleIdentifierGrammar"),  # Column name
+                ),
+                allow_trailing=True,
+            ),
+            # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_drop_column_statement
+            Delimited(
+                Sequence(
+                    "DROP",
+                    "COLUMN",
+                    Ref("IfExistsGrammar", optional=True),
+                    Ref("SingleIdentifierGrammar"),  # Column name
+                ),
+            ),
+            # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_column_set_options_statement
+            Delimited(
+                Sequence(
+                    "ALTER",
+                    "COLUMN",
+                    Ref("IfExistsGrammar", optional=True),
+                    Ref("SingleIdentifierGrammar"),  # Column name
+                    OneOf(
+                        Sequence(
+                            "SET",
+                            OneOf(
+                                Ref("OptionsSegment"),
+                                Sequence(
+                                    "DATA",
+                                    "TYPE",
+                                    Ref("DatatypeSegment"),
+                                ),
+                                Sequence(
+                                    "DEFAULT",
+                                    OneOf(
+                                        Ref("LiteralGrammar"),
+                                        Ref("FunctionSegment"),
+                                    ),
+                                ),
+                            ),
+                        ),
+                        Sequence("DROP", OneOf("DEFAULT", Sequence("NOT", "NULL"))),
+                    ),
+                ),
+            ),
+        ),
+    )
+
+
 class CreateExternalTableStatementSegment(BaseSegment):
     """A `CREATE EXTERNAL TABLE` statement.
 
@@ -1543,6 +1624,24 @@ class CreateViewStatementSegment(ansi.CreateViewStatementSegment):
     )
 
 
+class AlterViewStatementSegment(BaseSegment):
+    """A `ALTER VIEW` statement.
+
+    https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_view_set_options_statement
+    """
+
+    type = "alter_view_statement"
+
+    match_grammar = Sequence(
+        "ALTER",
+        "VIEW",
+        Ref("IfExistsGrammar", optional=True),
+        Ref("TableReferenceSegment"),
+        "SET",
+        Ref("OptionsSegment"),
+    )
+
+
 class CreateMaterializedViewStatementSegment(BaseSegment):
     """A `CREATE MATERIALIZED VIEW` statement.
 
@@ -1612,6 +1711,17 @@ class ParameterizedSegment(BaseSegment):
     match_grammar = OneOf(Ref("AtSignLiteralSegment"), Ref("QuestionMarkSegment"))
 
 
+class PivotForClauseSegment(BaseSegment):
+    """The FOR part of a PIVOT expression.
+
+    Needed to avoid BaseExpressionElementGrammar swallowing up the IN part
+    """
+
+    type = "pivot_for_clause"
+    match_grammar = GreedyUntil("IN")
+    parse_grammar = Ref("BaseExpressionElementGrammar")
+
+
 class FromPivotExpressionSegment(BaseSegment):
     """A PIVOT expression.
 
@@ -1629,7 +1739,7 @@ class FromPivotExpressionSegment(BaseSegment):
                 ),
             ),
             "FOR",
-            Ref("SingleIdentifierGrammar"),
+            Ref("PivotForClauseSegment"),
             "IN",
             Bracketed(
                 Delimited(
@@ -1648,12 +1758,14 @@ class UnpivotAliasExpressionSegment(BaseSegment):
 
     type = "alias_expression"
     match_grammar = Sequence(
+        Indent,
         Ref.keyword("AS", optional=True),
         OneOf(
             Ref("SingleQuotedLiteralSegment"),
             Ref("DoubleQuotedLiteralSegment"),
             Ref("NumericLiteralSegment"),
         ),
+        Dedent,
     )
 
 
@@ -1672,16 +1784,22 @@ class FromUnpivotExpressionSegment(BaseSegment):
             optional=True,
         ),
         OneOf(
+            # single column unpivot
             Bracketed(
                 Ref("SingleIdentifierGrammar"),
                 "FOR",
                 Ref("SingleIdentifierGrammar"),
                 "IN",
                 Bracketed(
-                    Delimited(Ref("SingleIdentifierGrammar")),
-                    Ref("UnpivotAliasExpressionSegment", optional=True),
+                    Delimited(
+                        Sequence(
+                            Delimited(Ref("SingleIdentifierGrammar")),
+                            Ref("UnpivotAliasExpressionSegment", optional=True),
+                        ),
+                    ),
                 ),
             ),
+            # multi column unpivot
             Bracketed(
                 Bracketed(
                     Delimited(
@@ -1809,9 +1927,7 @@ class MergeInsertClauseSegment(ansi.MergeInsertClauseSegment):
             Indent,
             Ref("BracketedColumnReferenceListGrammar", optional=True),
             Dedent,
-            Indent,
             Ref("ValuesClauseSegment", optional=True),
-            Dedent,
         ),
         Sequence("INSERT", "ROW"),
     )
@@ -1829,7 +1945,7 @@ class DeleteStatementSegment(BaseSegment):
     match_grammar: Matchable = Sequence(
         "DELETE",
         Ref.keyword("FROM", optional=True),
-        Ref("ObjectReferenceSegment"),
+        Ref("TableReferenceSegment"),
         Ref("AliasExpressionSegment", optional=True),
         Ref("WhereClauseSegment", optional=True),
     )
diff --git a/src/sqlfluff/dialects/dialect_clickhouse.py b/src/sqlfluff/dialects/dialect_clickhouse.py
index 961abae..402e4cd 100644
--- a/src/sqlfluff/dialects/dialect_clickhouse.py
+++ b/src/sqlfluff/dialects/dialect_clickhouse.py
@@ -2,19 +2,24 @@
 
 https://clickhouse.com/
 """
-
 from sqlfluff.core.dialects import load_raw_dialect
 from sqlfluff.core.parser import (
     AnyNumberOf,
     AnySetOf,
     BaseSegment,
     Bracketed,
+    Conditional,
+    Dedent,
     Delimited,
+    Indent,
     Matchable,
     OneOf,
     OptionallyBracketed,
     Ref,
     Sequence,
+    SymbolSegment,
+    TypedParser,
+    StringLexer,
 )
 from sqlfluff.dialects import dialect_ansi as ansi
 from sqlfluff.dialects.dialect_clickhouse_keywords import (
@@ -25,7 +30,183 @@ ansi_dialect = load_raw_dialect("ansi")
 
 clickhouse_dialect = ansi_dialect.copy_as("clickhouse")
 clickhouse_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS)
-clickhouse_dialect.sets("reserved_keywords").clear()
+
+clickhouse_dialect.insert_lexer_matchers(
+    # https://clickhouse.com/docs/en/sql-reference/functions#higher-order-functions---operator-and-lambdaparams-expr-function
+    [StringLexer("lambda", r"->", SymbolSegment, segment_kwargs={"type": "lambda"})],
+    before="newline",
+)
+
+clickhouse_dialect.add(
+    JoinTypeKeywords=OneOf(
+        # This case INNER [ANY,ALL] JOIN
+        Sequence("INNER", OneOf("ALL", "ANY", optional=True)),
+        # This case [ANY,ALL] INNER JOIN
+        Sequence(OneOf("ALL", "ANY", optional=True), "INNER"),
+        # This case FULL ALL OUTER JOIN
+        Sequence(
+            "FULL",
+            Ref.keyword("ALL", optional=True),
+            Ref.keyword("OUTER", optional=True),
+        ),
+        # This case ALL FULL OUTER JOIN
+        Sequence(
+            Ref.keyword("ALL", optional=True),
+            "FULL",
+            Ref.keyword("OUTER", optional=True),
+        ),
+        # This case LEFT [OUTER,ANTI,SEMI,ANY,ASOF] JOIN
+        Sequence(
+            "LEFT",
+            OneOf(
+                "ANTI",
+                "SEMI",
+                OneOf("ANY", "ALL", optional=True),
+                "ASOF",
+                optional=True,
+            ),
+            Ref.keyword("OUTER", optional=True),
+        ),
+        # This case [ANTI,SEMI,ANY,ASOF] LEFT JOIN
+        Sequence(
+            OneOf(
+                "ANTI",
+                "SEMI",
+                OneOf("ANY", "ALL", optional=True),
+                "ASOF",
+            ),
+            "LEFT",
+        ),
+        # This case RIGHT [OUTER,ANTI,SEMI,ANY,ASOF] JOIN
+        Sequence(
+            "RIGHT",
+            OneOf(
+                "OUTER",
+                "ANTI",
+                "SEMI",
+                OneOf("ANY", "ALL", optional=True),
+                optional=True,
+            ),
+            Ref.keyword("OUTER", optional=True),
+        ),
+        # This case [OUTER,ANTI,SEMI,ANY] RIGHT JOIN
+        Sequence(
+            OneOf(
+                "ANTI",
+                "SEMI",
+                OneOf("ANY", "ALL", optional=True),
+                optional=True,
+            ),
+            "RIGHT",
+        ),
+        # This case CROSS JOIN
+        "CROSS",
+        # This case ANY JOIN
+        "ANY",
+        # This case ALL JOIN
+        "ALL",
+    ),
+    LambdaFunctionSegment=TypedParser("lambda", SymbolSegment, type="lambda"),
+)
+clickhouse_dialect.replace(
+    BinaryOperatorGrammar=OneOf(
+        Ref("ArithmeticBinaryOperatorGrammar"),
+        Ref("StringBinaryOperatorGrammar"),
+        Ref("BooleanBinaryOperatorGrammar"),
+        Ref("ComparisonOperatorGrammar"),
+        # Add Lambda Function
+        Ref("LambdaFunctionSegment"),
+    ),
+)
+
+clickhouse_dialect.replace(
+    JoinLikeClauseGrammar=Sequence(
+        AnyNumberOf(
+            Ref("ArrayJoinClauseSegment"),
+            min_times=1,
+        ),
+        Ref("AliasExpressionSegment", optional=True),
+    ),
+)
+
+
+class BracketedArguments(ansi.BracketedArguments):
+    """A series of bracketed arguments.
+
+    e.g. the bracketed part of numeric(1, 3)
+    """
+
+    match_grammar = Bracketed(
+        Delimited(
+            OneOf(
+                # Dataypes like Nullable allow optional datatypes here.
+                Ref("DatatypeIdentifierSegment"),
+                Ref("NumericLiteralSegment"),
+            ),
+            # The brackets might be empty for some cases...
+            optional=True,
+        ),
+    )
+
+
+class JoinClauseSegment(ansi.JoinClauseSegment):
+    """Any number of join clauses, including the `JOIN` keyword.
+
+    https://clickhouse.com/docs/en/sql-reference/statements/select/join/#supported-types-of-join
+    """
+
+    match_grammar = OneOf(
+        Sequence(
+            Ref("JoinTypeKeywords", optional=True),
+            Ref("JoinKeywordsGrammar"),
+            Indent,
+            Ref("FromExpressionElementSegment"),
+            Dedent,
+            Conditional(Indent, indented_using_on=True),
+            OneOf(
+                # ON clause
+                Ref("JoinOnConditionSegment"),
+                # USING clause
+                Sequence(
+                    "USING",
+                    Conditional(Indent, indented_using_on=False),
+                    Delimited(
+                        OneOf(
+                            Bracketed(
+                                Delimited(Ref("SingleIdentifierGrammar")),
+                                ephemeral_name="UsingClauseContents",
+                            ),
+                            Delimited(Ref("SingleIdentifierGrammar")),
+                        ),
+                    ),
+                    Conditional(Dedent, indented_using_on=False),
+                ),
+                # Requires True for CROSS JOIN
+                optional=True,
+            ),
+            Conditional(Dedent, indented_using_on=True),
+        ),
+    )
+
+
+class ArrayJoinClauseSegment(BaseSegment):
+    """[LEFT] ARRAY JOIN does not support Join conditions and doesn't work as real JOIN.
+
+    https://clickhouse.com/docs/en/sql-reference/statements/select/array-join
+    """
+
+    type = "array_join_clause"
+
+    match_grammar: Matchable = Sequence(
+        Ref.keyword("LEFT", optional=True),
+        "ARRAY",
+        Ref("JoinKeywordsGrammar"),
+        Indent,
+        Delimited(
+            Ref("SelectClauseElementSegment"),
+        ),
+        Dedent,
+    )
 
 
 class CTEDefinitionSegment(ansi.CTEDefinitionSegment):
@@ -54,6 +235,30 @@ class CTEDefinitionSegment(ansi.CTEDefinitionSegment):
     )
 
 
+class AliasExpressionSegment(ansi.AliasExpressionSegment):
+    """A reference to an object with an `AS` clause."""
+
+    type = "alias_expression"
+    match_grammar: Matchable = Sequence(
+        Indent,
+        Ref.keyword("AS", optional=True),
+        OneOf(
+            Sequence(
+                Ref("SingleIdentifierGrammar"),
+                # Column alias in VALUES clause
+                Bracketed(Ref("SingleIdentifierListSegment"), optional=True),
+            ),
+            Ref("SingleQuotedIdentifierSegment"),
+            exclude=OneOf(
+                "LATERAL",
+                "WINDOW",
+                "KEYS",
+            ),
+        ),
+        Dedent,
+    )
+
+
 class FromExpressionElementSegment(ansi.FromExpressionElementSegment):
     """A table expression.
 
@@ -70,7 +275,8 @@ class FromExpressionElementSegment(ansi.FromExpressionElementSegment):
             exclude=OneOf(
                 Ref("SamplingExpressionSegment"),
                 Ref("JoinLikeClauseGrammar"),
-                Ref.keyword("Final"),
+                "FINAL",
+                Ref("JoinClauseSegment"),
             ),
             optional=True,
         ),
@@ -274,12 +480,6 @@ class ColumnConstraintSegment(BaseSegment):
     )
 
 
-class ClusterReferenceSegment(ansi.ObjectReferenceSegment):
-    """A reference to a cluster."""
-
-    type = "cluster_reference"
-
-
 class CreateTableStatementSegment(ansi.CreateTableStatementSegment):
     """A `CREATE TABLE` statement.
 
@@ -302,7 +502,7 @@ class CreateTableStatementSegment(ansi.CreateTableStatementSegment):
         Sequence(
             "ON",
             "CLUSTER",
-            Ref("ClusterReferenceSegment"),
+            Ref("ExpressionSegment"),
             optional=True,
         ),
         OneOf(
@@ -351,15 +551,50 @@ class CreateTableStatementSegment(ansi.CreateTableStatementSegment):
     )
 
 
+class CreateMaterializedViewStatementSegment(BaseSegment):
+    """A `CREATE MATERIALIZED VIEW` statement.
+
+    https://clickhouse.com/docs/en/sql-reference/statements/create/table/
+    """
+
+    type = "create_materialized_view_statement"
+
+    match_grammar = Sequence(
+        "CREATE",
+        "MATERIALIZED",
+        "VIEW",
+        Ref("IfNotExistsGrammar", optional=True),
+        Ref("TableReferenceSegment"),
+        Sequence(
+            "ON",
+            "CLUSTER",
+            Ref("ExpressionSegment"),
+            optional=True,
+        ),
+        OneOf(
+            Sequence(
+                "TO",
+                Ref("TableReferenceSegment"),
+                Ref("EngineSegment", optional=True),
+            ),
+            Sequence(
+                Ref("EngineSegment", optional=True),
+                Sequence("POPULATE", optional=True),
+            ),
+        ),
+        "AS",
+        Ref("SelectableGrammar"),
+        Ref("TableEndClauseSegment", optional=True),
+    )
+
+
 class StatementSegment(ansi.StatementSegment):
     """Overriding StatementSegment to allow for additional segment parsing."""
 
     match_grammar = ansi.StatementSegment.match_grammar
     parse_grammar = ansi.StatementSegment.parse_grammar.copy(
         insert=[
-            Ref("EngineSegment"),
-            Ref("ClusterReferenceSegment"),
-            Ref("ColumnTTLSegment"),
-            Ref("TableTTLSegment"),
+            Ref("CreateTableStatementSegment"),
+            Ref("CreateMaterializedViewStatementSegment"),
         ]
     )
diff --git a/src/sqlfluff/dialects/dialect_clickhouse_keywords.py b/src/sqlfluff/dialects/dialect_clickhouse_keywords.py
index 5e4248e..f997d3f 100644
--- a/src/sqlfluff/dialects/dialect_clickhouse_keywords.py
+++ b/src/sqlfluff/dialects/dialect_clickhouse_keywords.py
@@ -24,8 +24,6 @@ UNRESERVED_KEYWORDS = [
     "BETWEEN",
     "BOTH",
     "BY",
-    "CASE",
-    "CAST",
     "CHECK",
     "CLEAR",
     "CLUSTER",
@@ -35,7 +33,6 @@ UNRESERVED_KEYWORDS = [
     "COMMENT",
     "CONSTRAINT",
     "CREATE",
-    "CROSS",
     "CUBE",
     "DATABASE",
     "DATABASES",
@@ -72,7 +69,6 @@ UNRESERVED_KEYWORDS = [
     "FORMAT",
     "FREEZE",
     "FROM",
-    "FULL",
     "FUNCTION",
     "GLOBAL",
     "GRANULARITY",
@@ -82,25 +78,20 @@ UNRESERVED_KEYWORDS = [
     "HOUR",
     "ID",
     "IF",
-    "IGNORE",
     "ILIKE",
     "IN",
     "INDEX",
     "INF",
     "INJECTIVE",
-    "INNER",
     "INSERT",
-    "INTERVAL",
     "INTO",
     "IS",
     "IS_OBJECT_ID",
-    "JOIN",
     "KEY",
     "KILL",
     "LAST",
     "LAYOUT",
     "LEADING",
-    "LEFT",
     "LIFETIME",
     "LIKE",
     "LIMIT",
@@ -118,20 +109,14 @@ UNRESERVED_KEYWORDS = [
     "MOVE",
     "MUTATION",
     "NAN_SQL",
-    "NATURAL",
     "NO",
     "NOT",
-    "NULL",
     "NULL_SQL",
     "NULLS",
     "OFFSET",
-    "ON",
     "OPTIMIZE",
     "OR",
-    "ORDER",
-    "OUTER",
     "OUTFILE",
-    "PARTITION",
     "POPULATE",
     "PREWHERE",
     "PRIMARY",
@@ -144,16 +129,11 @@ UNRESERVED_KEYWORDS = [
     "REPLACE",
     "REPLICA",
     "REPLICATED",
-    "RESPECT",
-    "RIGHT",
     "ROLLUP",
-    "ROWS",
     "SAMPLE",
     "SECOND",
-    "SELECT",
     "SEMI",
     "SENDS",
-    "SET",
     "SETTINGS",
     "SHOW",
     "SOURCE",
@@ -179,10 +159,8 @@ UNRESERVED_KEYWORDS = [
     "TRUNCATE",
     "TTL",
     "TYPE",
-    "UNION",
     "UPDATE",
     "USE",
-    "USING",
     "UUID",
     "VALUES",
     "VIEW",
diff --git a/src/sqlfluff/dialects/dialect_databricks.py b/src/sqlfluff/dialects/dialect_databricks.py
new file mode 100644
index 0000000..441e412
--- /dev/null
+++ b/src/sqlfluff/dialects/dialect_databricks.py
@@ -0,0 +1,140 @@
+"""The Databricks Dialect.
+
+Functionally, it is quite similar to SparkSQL,
+however it's much less strict on keywords.
+It also has some extensions.
+"""
+
+from sqlfluff.core.dialects import load_raw_dialect
+from sqlfluff.core.parser import (
+    BaseSegment,
+    OneOf,
+    Ref,
+    Sequence,
+)
+from sqlfluff.dialects.dialect_databricks_keywords import (
+    RESERVED_KEYWORDS,
+    UNRESERVED_KEYWORDS,
+)
+
+from sqlfluff.dialects import dialect_ansi as ansi
+from sqlfluff.dialects import dialect_sparksql as sparksql
+
+sparksql_dialect = load_raw_dialect("sparksql")
+databricks_dialect = sparksql_dialect.copy_as("databricks")
+
+databricks_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS)
+databricks_dialect.sets("unreserved_keywords").update(
+    sparksql_dialect.sets("reserved_keywords")
+)
+databricks_dialect.sets("unreserved_keywords").difference_update(RESERVED_KEYWORDS)
+databricks_dialect.sets("reserved_keywords").clear()
+databricks_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS)
+
+
+# Object References
+class CatalogReferenceSegment(ansi.ObjectReferenceSegment):
+    """A reference to a catalog.
+
+    https://docs.databricks.com/data-governance/unity-catalog/create-catalogs.html
+    """
+
+    type = "catalog_reference"
+
+
+# Data Definition Statements
+# https://docs.databricks.com/sql/language-manual/index.html#ddl-statements
+class AlterCatalogStatementSegment(BaseSegment):
+    """An `ALTER CATALOG` statement.
+
+    https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-alter-catalog.html
+    """
+
+    type = "alter_catalog_statement"
+    match_grammar = Sequence(
+        "ALTER",
+        "CATALOG",
+        Ref("CatalogReferenceSegment"),
+        Ref.keyword("SET", optional=True),
+        Sequence(
+            "OWNER",
+            "TO",
+            Ref("SingleIdentifierGrammar"),
+        ),
+    )
+
+
+class CreateCatalogStatementSegment(BaseSegment):
+    """A `CREATE CATALOG` statement.
+
+    https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-create-catalog.html
+    """
+
+    type = "create_catalog_statement"
+    match_grammar = Sequence(
+        "CREATE",
+        "CATALOG",
+        Ref("IfNotExistsGrammar", optional=True),
+        Ref("CatalogReferenceSegment"),
+        Ref("CommentGrammar", optional=True),
+    )
+
+
+class DropCatalogStatementSegment(BaseSegment):
+    """A `DROP CATALOG` statement.
+
+    https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-drop-catalog.html
+    """
+
+    type = "drop_catalog_statement"
+    match_grammar = Sequence(
+        "DROP",
+        "CATALOG",
+        Ref("IfExistsGrammar", optional=True),
+        Ref("CatalogReferenceSegment"),
+        Ref("DropBehaviorGrammar", optional=True),
+    )
+
+
+class UseCatalogStatementSegment(BaseSegment):
+    """A `USE CATALOG` statement.
+
+    https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html
+    """
+
+    type = "use_catalog_statement"
+    match_grammar = Sequence(
+        "USE",
+        "CATALOG",
+        Ref("CatalogReferenceSegment"),
+    )
+
+
+class UseDatabaseStatementSegment(sparksql.UseDatabaseStatementSegment):
+    """A `USE DATABASE` statement.
+
+    https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-usedb.html
+    """
+
+    type = "use_database_statement"
+    match_grammar = Sequence(
+        "USE",
+        OneOf("DATABASE", "SCHEMA", optional=True),
+        Ref("DatabaseReferenceSegment"),
+    )
+
+
+class StatementSegment(sparksql.StatementSegment):
+    """Overriding StatementSegment to allow for additional segment parsing."""
+
+    match_grammar = sparksql.StatementSegment.match_grammar
+    parse_grammar = sparksql.StatementSegment.parse_grammar.copy(
+        # Segments defined in Databricks SQL dialect
+        insert=[
+            # Unity Catalog
+            Ref("AlterCatalogStatementSegment"),
+            Ref("CreateCatalogStatementSegment"),
+            Ref("DropCatalogStatementSegment"),
+            Ref("UseCatalogStatementSegment"),
+        ]
+    )
diff --git a/src/sqlfluff/dialects/dialect_databricks_keywords.py b/src/sqlfluff/dialects/dialect_databricks_keywords.py
new file mode 100644
index 0000000..2a65f0f
--- /dev/null
+++ b/src/sqlfluff/dialects/dialect_databricks_keywords.py
@@ -0,0 +1,28 @@
+"""A list of databricks reserved keywords.
+
+https://docs.databricks.com/sql/language-manual/sql-ref-reserved-words.html
+"""
+
+RESERVED_KEYWORDS = [
+    "ANTI",
+    "CROSS",
+    "EXCEPT",
+    "FULL",
+    "INNER",
+    "INTERSECT",
+    "JOIN",
+    "LATERAL",
+    "LEFT",
+    "MINUS",
+    "NATURAL",
+    "ON",
+    "RIGHT",
+    "SEMI",
+    "UNION",
+    "USING",
+]
+
+UNRESERVED_KEYWORDS = [
+    # Unity Catalog
+    "CATALOG"
+]
diff --git a/src/sqlfluff/dialects/dialect_db2.py b/src/sqlfluff/dialects/dialect_db2.py
index e4a729e..22c5439 100644
--- a/src/sqlfluff/dialects/dialect_db2.py
+++ b/src/sqlfluff/dialects/dialect_db2.py
@@ -13,9 +13,23 @@ from sqlfluff.core.parser import (
 )
 from sqlfluff.dialects import dialect_ansi as ansi
 
+from sqlfluff.dialects.dialect_db2_keywords import UNRESERVED_KEYWORDS
+
+from sqlfluff.core.parser.grammar.base import Ref
+from sqlfluff.core.parser.grammar.sequence import Sequence
+from sqlfluff.core.parser.segments.base import BaseSegment
+
+from sqlfluff.core.parser.grammar.base import Anything
+from sqlfluff.core.parser.grammar.sequence import Bracketed
+
+from sqlfluff.core.parser.grammar.anyof import OneOf
+
+from sqlfluff.core.parser.grammar.anyof import AnyNumberOf
+
 ansi_dialect = load_raw_dialect("ansi")
 
 db2_dialect = ansi_dialect.copy_as("db2")
+db2_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS)
 
 
 db2_dialect.replace(
@@ -29,6 +43,23 @@ db2_dialect.replace(
             anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$",
         )
     ),
+    PostFunctionGrammar=OneOf(
+        Ref("OverClauseSegment"),
+        Ref("WithinGroupClauseSegment"),
+    ),
+    Expression_C_Grammar=OneOf(
+        Sequence("EXISTS", Bracketed(Ref("SelectableGrammar"))),
+        # should be first priority, otherwise EXISTS() would be matched as a function
+        Sequence(
+            OneOf(
+                Ref("Expression_D_Grammar"),
+                Ref("CaseExpressionSegment"),
+            ),
+            AnyNumberOf(Ref("TimeZoneGrammar")),
+        ),
+        Ref("ShorthandCastSegment"),
+        Sequence(Ref("NumericLiteralSegment"), OneOf("DAYS", "DAY")),
+    ),
 )
 
 
@@ -56,6 +87,25 @@ db2_dialect.patch_lexer_matchers(
             segment_kwargs={"type": "double_quote"},
         ),
         # In Db2, a field could have a # pound/hash sign
-        RegexLexer("code", r"[0-9a-zA-Z_#]+", CodeSegment),
+        RegexLexer(
+            "code", r"[0-9a-zA-Z_#]+", CodeSegment, segment_kwargs={"type": "code"}
+        ),
     ]
 )
+
+
+class WithinGroupClauseSegment(BaseSegment):
+    """An WITHIN GROUP clause for window functions."""
+
+    type = "withingroup_clause"
+    match_grammar = Sequence(
+        "WITHIN",
+        "GROUP",
+        Bracketed(Anything(optional=True)),
+    )
+
+    parse_grammar = Sequence(
+        "WITHIN",
+        "GROUP",
+        Bracketed(Ref("OrderByClauseSegment", optional=True)),
+    )
diff --git a/src/sqlfluff/dialects/dialect_db2_keywords.py b/src/sqlfluff/dialects/dialect_db2_keywords.py
new file mode 100644
index 0000000..c8ddc06
--- /dev/null
+++ b/src/sqlfluff/dialects/dialect_db2_keywords.py
@@ -0,0 +1,412 @@
+"""A list of db2 keywords."""
+
+UNRESERVED_KEYWORDS = [
+    # https://www.ibm.com/docs/en/db2/11.5?topic=sql-reserved-schema-names-reserved-words
+    "ACTIVATE",
+    "ADD",
+    "AFTER",
+    "ALIAS",
+    "ALL",
+    "ALLOCATE",
+    "ALLOW",
+    "ALTER",
+    "AND",
+    "ANY",
+    "AS",
+    "ASENSITIVE",
+    "ASSOCIATE",
+    "ASUTIME",
+    "AT",
+    "ATTRIBUTES",
+    "AUDIT",
+    "AUTHORIZATION",
+    "AUX",
+    "AUXILIARY",
+    "BEFORE",
+    "BEGIN",
+    "BETWEEN",
+    "BINARY",
+    "BUFFERPOOL",
+    "BY",
+    "CACHE",
+    "CALL",
+    "CALLED",
+    "CAPTURE",
+    "CARDINALITY",
+    "CASCADED",
+    "CASE",
+    "CAST",
+    "CCSID",
+    "CHAR",
+    "CHARACTER",
+    "CHECK",
+    "CLONE",
+    "CLOSE",
+    "CLUSTER",
+    "COLLECTION",
+    "COLLID",
+    "COLUMN",
+    "COMMENT",
+    "COMMIT",
+    "CONCAT",
+    "CONDITION",
+    "CONNECT",
+    "CONNECTION",
+    "CONSTRAINT",
+    "CONTAINS",
+    "CONTINUE",
+    "COUNT",
+    "COUNT_BIG",
+    "CREATE",
+    "CROSS",
+    "CURRENT",
+    "CURRENT_DATE",
+    "CURRENT_LC_CTYPE",
+    "CURRENT_PATH",
+    "CURRENT_SCHEMA",
+    "CURRENT_SERVER",
+    "CURRENT_TIME",
+    "CURRENT_TIMESTAMP",
+    "CURRENT_TIMEZONE",
+    "CURRENT_USER",
+    "CURSOR",
+    "CYCLE",
+    "DATA",
+    "DATABASE",
+    "DATAPARTITIONNAME",
+    "DATAPARTITIONNUM",
+    "DATE",
+    "DAY",
+    "DAYS",
+    "DB2GENERAL",
+    "DB2GENRL",
+    "DB2SQL",
+    "DBINFO",
+    "DBPARTITIONNAME",
+    "DBPARTITIONNUM",
+    "DEALLOCATE",
+    "DECLARE",
+    "DEFAULT",
+    "DEFAULTS",
+    "DEFINITION",
+    "DELETE",
+    "DENSERANK",
+    "DENSE_RANK",
+    "DESCRIBE",
+    "DESCRIPTOR",
+    "DETERMINISTIC",
+    "DIAGNOSTICS",
+    "DISABLE",
+    "DISALLOW",
+    "DISCONNECT",
+    "DISTINCT",
+    "DO",
+    "DOCUMENT",
+    "DOUBLE",
+    "DROP",
+    "DSSIZE",
+    "DYNAMIC",
+    "EACH",
+    "EDITPROC",
+    "ELSE",
+    "ELSEIF",
+    "ENABLE",
+    "ENCODING",
+    "ENCRYPTION",
+    "END",
+    "END-EXEC",
+    "ENDING",
+    "ERASE",
+    "ESCAPE",
+    "EVERY",
+    "EXCEPT",
+    "EXCEPTION",
+    "EXCLUDING",
+    "EXCLUSIVE",
+    "EXECUTE",
+    "EXISTS",
+    "EXIT",
+    "EXPLAIN",
+    "EXTENDED",
+    "EXTERNAL",
+    "EXTRACT",
+    "FENCED",
+    "FETCH",
+    "FIELDPROC",
+    "FILE",
+    "FINAL",
+    "FIRST1",
+    "FOR",
+    "FOREIGN",
+    "FREE",
+    "FROM",
+    "FULL",
+    "FUNCTION",
+    "GENERAL",
+    "GENERATED",
+    "GET",
+    "GLOBAL",
+    "GO",
+    "GOTO",
+    "GRANT",
+    "GRAPHIC",
+    "GROUP",
+    "HANDLER",
+    "HASH",
+    "HASHED_VALUE",
+    "HAVING",
+    "HINT",
+    "HOLD",
+    "HOUR",
+    "HOURS",
+    "IDENTITY",
+    "IF",
+    "IMMEDIATE",
+    "IMPORT",
+    "IN",
+    "INCLUDING",
+    "INCLUSIVE",
+    "INCREMENT",
+    "INDEX",
+    "INDICATOR",
+    "INDICATORS",
+    "INF",
+    "INFINITY",
+    "INHERIT",
+    "INNER",
+    "INOUT",
+    "INSENSITIVE",
+    "INSERT",
+    "INTEGRITY",
+    "INTERSECT",
+    "INTO",
+    "IS",
+    "ISNULL",
+    "ISOBID",
+    "ISOLATION",
+    "ITERATE",
+    "JAR",
+    "JAVA",
+    "JOIN",
+    "KEEP",
+    "KEY",
+    "LABEL",
+    "LANGUAGE",
+    "LAST3",
+    "LATERAL",
+    "LC_CTYPE",
+    "LEAVE",
+    "LEFT",
+    "LIKE",
+    "LIMIT",
+    "LINKTYPE",
+    "LOCAL",
+    "LOCALDATE",
+    "LOCALE",
+    "LOCALTIME",
+    "LOCALTIMESTAMP",
+    "LOCATOR",
+    "LOCATORS",
+    "LOCK",
+    "LOCKMAX",
+    "LOCKSIZE",
+    "LONG",
+    "LOOP",
+    "MAINTAINED",
+    "MATERIALIZED",
+    "MAXVALUE",
+    "MICROSECOND",
+    "MICROSECONDS",
+    "MINUTE",
+    "MINUTES",
+    "MINVALUE",
+    "MODE",
+    "MODIFIES",
+    "MONTH",
+    "MONTHS",
+    "NAN",
+    "NEW",
+    "NEW_TABLE",
+    "NEXTVAL",
+    "NO",
+    "NOCACHE",
+    "NOCYCLE",
+    "NODENAME",
+    "NODENUMBER",
+    "NOMAXVALUE",
+    "NOMINVALUE",
+    "NONE",
+    "NOORDER",
+    "NORMALIZED",
+    "NOT2",
+    "NOTNULL",
+    "NULL",
+    "NULLS",
+    "NUMPARTS",
+    "OBID",
+    "OF",
+    "OFF",
+    "OFFSET",
+    "OLD",
+    "OLD_TABLE",
+    "ON",
+    "OPEN",
+    "OPTIMIZATION",
+    "OPTIMIZE",
+    "OPTION",
+    "OR",
+    "ORDER",
+    "OUT",
+    "OUTER",
+    "OVER",
+    "OVERRIDING",
+    "PACKAGE",
+    "PADDED",
+    "PAGESIZE",
+    "PARAMETER",
+    "PART",
+    "PARTITION",
+    "PARTITIONED",
+    "PARTITIONING",
+    "PARTITIONS",
+    "PASSWORD",
+    "PATH",
+    "PERCENT",
+    "PIECESIZE",
+    "PLAN",
+    "POSITION",
+    "PRECISION",
+    "PREPARE",
+    "PREVVAL",
+    "PRIMARY",
+    "PRIQTY",
+    "PRIVILEGES",
+    "PROCEDURE",
+    "PROGRAM",
+    "PSID",
+    "PUBLIC",
+    "QUERY",
+    "QUERYNO",
+    "RANGE",
+    "RANK",
+    "READ",
+    "READS",
+    "RECOVERY",
+    "REFERENCES",
+    "REFERENCING",
+    "REFRESH",
+    "RELEASE",
+    "RENAME",
+    "REPEAT",
+    "RESET",
+    "RESIGNAL",
+    "RESTART",
+    "RESTRICT",
+    "RESULT",
+    "RESULT_SET_LOCATOR",
+    "RETURN",
+    "RETURNS",
+    "REVOKE",
+    "RIGHT",
+    "ROLE",
+    "ROLLBACK",
+    "ROUND_CEILING",
+    "ROUND_DOWN",
+    "ROUND_FLOOR",
+    "ROUND_HALF_DOWN",
+    "ROUND_HALF_EVEN",
+    "ROUND_HALF_UP",
+    "ROUND_UP",
+    "ROUTINE",
+    "ROW",
+    "ROWNUMBER",
+    "ROWS",
+    "ROWSET",
+    "ROW_NUMBER",
+    "RRN",
+    "RUN",
+    "SAVEPOINT",
+    "SCHEMA",
+    "SCRATCHPAD",
+    "SCROLL",
+    "SEARCH",
+    "SECOND",
+    "SECONDS",
+    "SECQTY",
+    "SECURITY",
+    "SELECT",
+    "SENSITIVE",
+    "SEQUENCE",
+    "SESSION",
+    "SESSION_USER",
+    "SET",
+    "SIGNAL",
+    "SIMPLE",
+    "SNAN",
+    "SOME",
+    "SOURCE",
+    "SPECIFIC",
+    "SQL",
+    "SQLID",
+    "STACKED",
+    "STANDARD",
+    "START",
+    "STARTING",
+    "STATEMENT",
+    "STATIC",
+    "STATMENT",
+    "STAY",
+    "STOGROUP",
+    "STORES",
+    "STYLE",
+    "SUBSTRING",
+    "SUMMARY",
+    "SYNONYM",
+    "SYSFUN",
+    "SYSIBM",
+    "SYSPROC",
+    "SYSTEM",
+    "SYSTEM_USER",
+    "TABLE",
+    "TABLESPACE",
+    "THEN",
+    "TIME",
+    "TIMESTAMP",
+    "TO",
+    "TRANSACTION",
+    "TRIGGER",
+    "TRIM",
+    "TRUNCATE",
+    "TYPE",
+    "UNDO",
+    "UNION",
+    "UNIQUE",
+    "UNTIL",
+    "UPDATE",
+    "USAGE",
+    "USER",
+    "USING",
+    "VALIDPROC",
+    "VALUE",
+    "VALUES",
+    "VARIABLE",
+    "VARIANT",
+    "VCAT",
+    "VERSION",
+    "VIEW",
+    "VOLATILE",
+    "VOLUMES",
+    "WHEN",
+    "WHENEVER",
+    "WHERE",
+    "WHILE",
+    "WITH",
+    "WITHOUT",
+    "WLM",
+    "WRITE",
+    "XMLELEMENT",
+    "XMLEXISTS",
+    "XMLNAMESPACES",
+    "YEAR",
+    "YEARS",
+]
diff --git a/src/sqlfluff/dialects/dialect_duckdb.py b/src/sqlfluff/dialects/dialect_duckdb.py
new file mode 100644
index 0000000..2dc3d5e
--- /dev/null
+++ b/src/sqlfluff/dialects/dialect_duckdb.py
@@ -0,0 +1,139 @@
+"""The DuckDB dialect.
+
+https://duckdb.org/docs/
+"""
+
+from typing import Optional
+
+from sqlfluff.core.dialects import load_raw_dialect
+from sqlfluff.dialects import dialect_ansi as ansi
+from sqlfluff.core.parser import (
+    Bracketed,
+    Dedent,
+    Delimited,
+    Indent,
+    Matchable,
+    OneOf,
+    Ref,
+    Sequence,
+    StartsWith,
+)
+
+postgres_dialect = load_raw_dialect("postgres")
+
+duckdb_dialect = postgres_dialect.copy_as("duckdb")
+
+duckdb_dialect.replace(
+    SingleIdentifierGrammar=OneOf(
+        Ref("NakedIdentifierSegment"),
+        Ref("QuotedIdentifierSegment"),
+        Ref("SingleQuotedIdentifierSegment"),
+    ),
+)
+
+
+class SelectClauseElementSegment(ansi.SelectClauseElementSegment):
+    """An element in the targets of a select statement."""
+
+    type = "select_clause_element"
+
+    match_grammar = OneOf(
+        Sequence(
+            Ref("WildcardExpressionSegment"),
+            OneOf(
+                Sequence(
+                    "EXCLUDE",
+                    OneOf(
+                        Ref("ColumnReferenceSegment"),
+                        Bracketed(Delimited(Ref("ColumnReferenceSegment"))),
+                    ),
+                ),
+                Sequence(
+                    "REPLACE",
+                    Bracketed(
+                        Delimited(
+                            Sequence(
+                                Ref("BaseExpressionElementGrammar"),
+                                Ref("AliasExpressionSegment", optional=True),
+                            ),
+                        )
+                    ),
+                ),
+                optional=True,
+            ),
+        ),
+        Sequence(
+            Ref("BaseExpressionElementGrammar"),
+            Ref("AliasExpressionSegment", optional=True),
+        ),
+    )
+
+
+class OrderByClauseSegment(ansi.OrderByClauseSegment):
+    """A `ORDER BY` clause like in `SELECT`."""
+
+    match_grammar: Matchable = StartsWith(
+        Sequence("ORDER", "BY"),
+        terminator=Ref("OrderByClauseTerminators"),
+    )
+
+    parse_grammar: Optional[Matchable] = Sequence(
+        "ORDER",
+        "BY",
+        Indent,
+        Delimited(
+            Sequence(
+                OneOf(
+                    "ALL",
+                    Ref("ColumnReferenceSegment"),
+                    Ref("NumericLiteralSegment"),
+                    Ref("ExpressionSegment"),
+                ),
+                OneOf("ASC", "DESC", optional=True),
+                Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True),
+            ),
+            allow_trailing=True,
+            terminator=OneOf(Ref.keyword("LIMIT"), Ref("FrameClauseUnitGrammar")),
+        ),
+        Dedent,
+    )
+
+
+class GroupByClauseSegment(ansi.GroupByClauseSegment):
+    """A `GROUP BY` clause like in `SELECT`."""
+
+    match_grammar: Matchable = StartsWith(
+        Sequence("GROUP", "BY"),
+        terminator=Ref("GroupByClauseTerminatorGrammar"),
+        enforce_whitespace_preceding_terminator=True,
+    )
+
+    parse_grammar: Optional[Matchable] = Sequence(
+        "GROUP",
+        "BY",
+        Indent,
+        Delimited(
+            OneOf(
+                "ALL",
+                Ref("ColumnReferenceSegment"),
+                Ref("NumericLiteralSegment"),
+                Ref("ExpressionSegment"),
+            ),
+            allow_trailing=True,
+            terminator=Ref("GroupByClauseTerminatorGrammar"),
+        ),
+        Dedent,
+    )
+
+
+class ObjectLiteralElementSegment(ansi.ObjectLiteralElementSegment):
+    """An object literal element segment."""
+
+    match_grammar: Matchable = Sequence(
+        OneOf(
+            Ref("NakedIdentifierSegment"),
+            Ref("QuotedLiteralSegment"),
+        ),
+        Ref("ColonSegment"),
+        Ref("BaseExpressionElementGrammar"),
+    )
diff --git a/src/sqlfluff/dialects/dialect_exasol.py b/src/sqlfluff/dialects/dialect_exasol.py
index 8a48e55..4156ce5 100644
--- a/src/sqlfluff/dialects/dialect_exasol.py
+++ b/src/sqlfluff/dialects/dialect_exasol.py
@@ -998,6 +998,21 @@ class ColumnDatatypeSegment(BaseSegment):
     )
 
 
+class BracketedArguments(ansi.BracketedArguments):
+    """A series of bracketed arguments.
+
+    e.g. the bracketed part of numeric(1, 3)
+    """
+
+    match_grammar = Bracketed(
+        # The brackets might be empty for some cases...
+        Delimited(Ref("NumericLiteralSegment"), optional=True),
+        # In exasol, some types offer on optional MAX
+        # qualifier of BIT, BYTE or CHAR
+        OneOf("BIT", "BYTE", "CHAR", optional=True),
+    )
+
+
 class DatatypeSegment(BaseSegment):
     """A data type segment.
 
@@ -1012,12 +1027,7 @@ class DatatypeSegment(BaseSegment):
         # Numeric Data Types
         Sequence(
             OneOf("DECIMAL", "DEC", "NUMBER", "NUMERIC"),
-            Bracketed(
-                Delimited(
-                    Ref("NumericLiteralSegment"),
-                ),
-                optional=True,
-            ),
+            Ref("BracketedArguments", optional=True),
         ),
         "BIGINT",
         Sequence("DOUBLE", Ref.keyword("PRECISION", optional=True)),
@@ -1038,29 +1048,25 @@ class DatatypeSegment(BaseSegment):
         Sequence(
             "INTERVAL",
             "YEAR",
-            Bracketed(Ref("NumericLiteralSegment"), optional=True),
+            Ref("BracketedArguments", optional=True),
             "TO",
             "MONTH",
         ),
         Sequence(
             "INTERVAL",
             "DAY",
-            Bracketed(Ref("NumericLiteralSegment"), optional=True),
+            Ref("BracketedArguments", optional=True),
             "TO",
             "SECOND",
-            Bracketed(Ref("NumericLiteralSegment"), optional=True),
+            Ref("BracketedArguments", optional=True),
         ),
         Sequence(
             "GEOMETRY",
-            Bracketed(Ref("NumericLiteralSegment"), optional=True),
+            Ref("BracketedArguments", optional=True),
         ),
         Sequence(
             "HASHTYPE",
-            Bracketed(
-                Ref("NumericLiteralSegment"),
-                OneOf("BIT", "BYTE", optional=True),
-                optional=True,
-            ),
+            Ref("BracketedArguments", optional=True),
         ),
         Sequence(
             OneOf(
@@ -1073,23 +1079,19 @@ class DatatypeSegment(BaseSegment):
                         "NVARCHAR",
                         "NVARCHAR2",
                     ),
-                    Bracketed(
-                        Ref("NumericLiteralSegment"),
-                        OneOf("CHAR", "BYTE", optional=True),
-                        optional=True,
-                    ),
+                    Ref("BracketedArguments", optional=True),
                 ),
                 Sequence("LONG", "VARCHAR"),
                 Sequence(
                     "CHARACTER",
                     Sequence(
                         OneOf(Sequence("LARGE", "OBJECT"), "VARYING", optional=True),
-                        Bracketed(Ref("NumericLiteralSegment"), optional=True),
+                        Ref("BracketedArguments", optional=True),
                     ),
                 ),
                 Sequence(
                     "CLOB",
-                    Bracketed(Ref("NumericLiteralSegment"), optional=True),
+                    Ref("BracketedArguments", optional=True),
                 ),
             ),
             Ref("CharCharacterSetGrammar", optional=True),
@@ -1835,16 +1837,10 @@ class MergeInsertClauseSegment(BaseSegment):
     type = "merge_insert_clause"
     match_grammar = Sequence(
         "INSERT",
+        Indent,
         Ref("BracketedColumnReferenceListGrammar", optional=True),
-        "VALUES",
-        Bracketed(
-            Delimited(
-                OneOf(
-                    "DEFAULT",
-                    Ref("ExpressionSegment"),
-                ),
-            )
-        ),
+        Dedent,
+        Ref("ValuesClauseSegment", optional=True),
         Ref("WhereClauseSegment", optional=True),
     )
 
@@ -3229,14 +3225,12 @@ class FunctionBodySegment(BaseSegment):
 
     type = "function_body"
     match_grammar = Sequence(
-        Indent,
         OneOf(
             Ref("FunctionAssignmentSegment"),
             Ref("FunctionIfBranchSegment"),
             Ref("FunctionForLoopSegment"),
             Ref("FunctionWhileLoopSegment"),
         ),
-        Dedent,
     )
 
 
@@ -3266,18 +3260,26 @@ class FunctionIfBranchSegment(BaseSegment):
         "IF",
         AnyNumberOf(Ref("ExpressionSegment")),
         "THEN",
+        Indent,
         AnyNumberOf(Ref("FunctionBodySegment"), min_times=1),
+        Dedent,
         AnyNumberOf(
             Sequence(
                 OneOf("ELSIF", "ELSEIF"),
                 Ref("ExpressionSegment"),
                 "THEN",
+                Indent,
                 AnyNumberOf(Ref("FunctionBodySegment"), min_times=1),
+                Dedent,
             ),
             optional=True,
         ),
         Sequence(
-            "ELSE", AnyNumberOf(Ref("FunctionBodySegment"), min_times=1), optional=True
+            "ELSE",
+            Indent,
+            AnyNumberOf(Ref("FunctionBodySegment"), min_times=1),
+            Dedent,
+            optional=True,
         ),
         "END",
         "IF",
@@ -3633,7 +3635,7 @@ class FileSegment(BaseFileSegment):
 class EmitsSegment(BaseSegment):
     """EMITS Segment for JSON_EXTRACT for example.
 
-    In it's own segment to give it a type to allow L013 to find it easily.
+    In it's own segment to give it a type to allow AL03 to find it easily.
     """
 
     type = "emits_segment"
diff --git a/src/sqlfluff/dialects/dialect_greenplum.py b/src/sqlfluff/dialects/dialect_greenplum.py
new file mode 100644
index 0000000..a3c33db
--- /dev/null
+++ b/src/sqlfluff/dialects/dialect_greenplum.py
@@ -0,0 +1,179 @@
+"""The Greenplum dialect.
+
+Greenplum (http://www.greenplum.org/) is a Massively Parallel Postgres,
+so we base this dialect on Postgres.
+"""
+
+from sqlfluff.core.dialects import load_raw_dialect
+from sqlfluff.dialects import dialect_postgres as postgres
+from sqlfluff.core.parser import (
+    AnyNumberOf,
+    Bracketed,
+    Delimited,
+    OneOf,
+    Ref,
+    Sequence,
+)
+
+postgres_dialect = load_raw_dialect("postgres")
+
+greenplum_dialect = postgres_dialect.copy_as("greenplum")
+
+greenplum_dialect.sets("reserved_keywords").update(
+    ["DISTRIBUTED", "RANDOMLY", "REPLICATED"]
+)
+
+
+class CreateTableStatementSegment(postgres.CreateTableStatementSegment):
+    """A `CREATE TABLE` statement.
+
+    As specified in
+    https://docs.vmware.com/en/VMware-Tanzu-Greenplum/6/greenplum-database/GUID-ref_guide-sql_commands-CREATE_TABLE.html
+    This is overriden from Postgres to add the `DISTRIBUTED` clause.
+    """
+
+    match_grammar = Sequence(
+        "CREATE",
+        OneOf(
+            Sequence(
+                OneOf("GLOBAL", "LOCAL", optional=True),
+                Ref("TemporaryGrammar", optional=True),
+            ),
+            "UNLOGGED",
+            optional=True,
+        ),
+        "TABLE",
+        Ref("IfNotExistsGrammar", optional=True),
+        Ref("TableReferenceSegment"),
+        OneOf(
+            # Columns and comment syntax:
+            Sequence(
+                Bracketed(
+                    Delimited(
+                        OneOf(
+                            Sequence(
+                                Ref("ColumnReferenceSegment"),
+                                Ref("DatatypeSegment"),
+                                AnyNumberOf(
+                                    # A single COLLATE segment can come before or after
+                                    # constraint segments
+                                    OneOf(
+                                        Ref("ColumnConstraintSegment"),
+                                        Sequence(
+                                            "COLLATE",
+                                            Ref("ObjectReferenceSegment"),
+                                        ),
+                                    ),
+                                ),
+                            ),
+                            Ref("TableConstraintSegment"),
+                            Sequence(
+                                "LIKE",
+                                Ref("TableReferenceSegment"),
+                                AnyNumberOf(Ref("LikeOptionSegment"), optional=True),
+                            ),
+                        ),
+                    )
+                ),
+                Sequence(
+                    "INHERITS",
+                    Bracketed(Delimited(Ref("TableReferenceSegment"))),
+                    optional=True,
+                ),
+            ),
+            # Create OF syntax:
+            Sequence(
+                "OF",
+                Ref("ParameterNameSegment"),
+                Bracketed(
+                    Delimited(
+                        Sequence(
+                            Ref("ColumnReferenceSegment"),
+                            Sequence("WITH", "OPTIONS", optional=True),
+                            AnyNumberOf(Ref("ColumnConstraintSegment")),
+                        ),
+                        Ref("TableConstraintSegment"),
+                    ),
+                    optional=True,
+                ),
+            ),
+            # Create PARTITION OF syntax
+            Sequence(
+                "PARTITION",
+                "OF",
+                Ref("TableReferenceSegment"),
+                Bracketed(
+                    Delimited(
+                        Sequence(
+                            Ref("ColumnReferenceSegment"),
+                            Sequence("WITH", "OPTIONS", optional=True),
+                            AnyNumberOf(Ref("ColumnConstraintSegment")),
+                        ),
+                        Ref("TableConstraintSegment"),
+                    ),
+                    optional=True,
+                ),
+                OneOf(
+                    Sequence("FOR", "VALUES", Ref("PartitionBoundSpecSegment")),
+                    "DEFAULT",
+                ),
+            ),
+        ),
+        AnyNumberOf(
+            Sequence(
+                "PARTITION",
+                "BY",
+                OneOf("RANGE", "LIST", "HASH"),
+                Bracketed(
+                    AnyNumberOf(
+                        Delimited(
+                            Sequence(
+                                OneOf(
+                                    Ref("ColumnReferenceSegment"),
+                                    Ref("FunctionSegment"),
+                                ),
+                                AnyNumberOf(
+                                    Sequence(
+                                        "COLLATE",
+                                        Ref("QuotedLiteralSegment"),
+                                        optional=True,
+                                    ),
+                                    Ref("ParameterNameSegment", optional=True),
+                                ),
+                            ),
+                        )
+                    )
+                ),
+            ),
+            Sequence("USING", Ref("ParameterNameSegment")),
+            Sequence(
+                "WITH",
+                Bracketed(
+                    Delimited(
+                        Sequence(
+                            Ref("ParameterNameSegment"),
+                            Sequence(
+                                Ref("EqualsSegment"),
+                                Ref("LiteralGrammar"),
+                                optional=True,
+                            ),
+                        ),
+                    )
+                ),
+            ),
+            Sequence(
+                "ON",
+                "COMMIT",
+                OneOf(Sequence("PRESERVE", "ROWS"), Sequence("DELETE", "ROWS"), "DROP"),
+            ),
+            Sequence("TABLESPACE", Ref("TablespaceReferenceSegment")),
+            Sequence(
+                "DISTRIBUTED",
+                OneOf(
+                    "RANDOMLY",
+                    "REPLICATED",
+                    Sequence("BY", Bracketed(Ref("ColumnReferenceSegment"))),
+                ),
+            ),
+        ),
+    )
diff --git a/src/sqlfluff/dialects/dialect_hive.py b/src/sqlfluff/dialects/dialect_hive.py
index 2baf3f6..b40044e 100644
--- a/src/sqlfluff/dialects/dialect_hive.py
+++ b/src/sqlfluff/dialects/dialect_hive.py
@@ -147,7 +147,6 @@ hive_dialect.replace(
         TypedParser("double_quote", ansi.LiteralSegment, type="quoted_literal"),
         TypedParser("back_quote", ansi.LiteralSegment, type="quoted_literal"),
     ),
-    SimpleArrayTypeGrammar=Ref.keyword("ARRAY"),
     TrimParametersGrammar=Nothing(),
     SingleIdentifierGrammar=ansi_dialect.get_grammar("SingleIdentifierGrammar").copy(
         insert=[
@@ -225,6 +224,49 @@ hive_dialect.replace(
 )
 
 
+class ArrayTypeSegment(ansi.ArrayTypeSegment):
+    """Prefix for array literals specifying the type."""
+
+    type = "array_type"
+    match_grammar = Sequence(
+        "ARRAY",
+        Bracketed(
+            Ref("DatatypeSegment"),
+            bracket_type="angle",
+            bracket_pairs_set="angle_bracket_pairs",
+            optional=True,
+        ),
+    )
+
+
+class StructTypeSegment(ansi.StructTypeSegment):
+    """Expression to construct a STRUCT datatype."""
+
+    match_grammar = Sequence(
+        "STRUCT",
+        Ref("StructTypeSchemaSegment", optional=True),
+    )
+
+
+class StructTypeSchemaSegment(BaseSegment):
+    """Expression to construct the schema of a STRUCT datatype."""
+
+    type = "struct_type_schema"
+    match_grammar = Bracketed(
+        Delimited(
+            Sequence(
+                Ref("SingleIdentifierGrammar"),
+                Ref("ColonSegment"),
+                Ref("DatatypeSegment"),
+                Ref("CommentGrammar", optional=True),
+            ),
+            bracket_pairs_set="angle_bracket_pairs",
+        ),
+        bracket_pairs_set="angle_bracket_pairs",
+        bracket_type="angle",
+    )
+
+
 class CreateDatabaseStatementSegment(BaseSegment):
     """A `CREATE DATABASE` statement."""
 
@@ -338,6 +380,40 @@ class CreateTableStatementSegment(BaseSegment):
     )
 
 
+class TableConstraintSegment(ansi.TableConstraintSegment):
+    """A table constraint, e.g. for CREATE TABLE."""
+
+    type = "table_constraint"
+
+    match_grammar: Matchable = Sequence(
+        Sequence("CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True),
+        OneOf(
+            Sequence(
+                "UNIQUE",
+                Ref("BracketedColumnReferenceListGrammar"),
+            ),
+            Sequence(
+                Ref("PrimaryKeyGrammar"),
+                Ref("BracketedColumnReferenceListGrammar"),
+                Sequence(
+                    "DISABLE",
+                    "NOVALIDATE",
+                    OneOf("RELY", "NORELY", optional=True),
+                    optional=True,
+                ),
+            ),
+            Sequence(
+                Ref("ForeignKeyGrammar"),
+                Ref("BracketedColumnReferenceListGrammar"),
+                Ref(
+                    "ReferenceDefinitionGrammar"
+                ),  # REFERENCES reftable [ ( refcolumn) ]
+                Sequence("DISABLE", "NOVALIDATE", optional=True),
+            ),
+        ),
+    )
+
+
 class FromExpressionElementSegment(ansi.FromExpressionElementSegment):
     """Modified from ANSI to allow for `LATERAL VIEW` clause."""
 
@@ -396,12 +472,7 @@ class PrimitiveTypeSegment(BaseSegment):
         "TIMESTAMP",
         Sequence(
             OneOf("DECIMAL", "DEC", "NUMERIC"),
-            Bracketed(
-                Ref("NumericLiteralSegment"),
-                Ref("CommaSegment"),
-                Ref("NumericLiteralSegment"),
-                optional=True,
-            ),
+            Ref("BracketedArguments", optional=True),
         ),
         "DATE",
         "VARCHAR",
@@ -416,14 +487,8 @@ class DatatypeSegment(BaseSegment):
     type = "data_type"
     match_grammar = OneOf(
         Ref("PrimitiveTypeSegment"),
-        Sequence(
-            "ARRAY",
-            Bracketed(
-                Ref("DatatypeSegment"),
-                bracket_pairs_set="angle_bracket_pairs",
-                bracket_type="angle",
-            ),
-        ),
+        Ref("ArrayTypeSegment"),
+        Ref("SizedArrayTypeSegment"),
         Sequence(
             "MAP",
             Bracketed(
@@ -436,22 +501,7 @@ class DatatypeSegment(BaseSegment):
                 bracket_type="angle",
             ),
         ),
-        Sequence(
-            "STRUCT",
-            Bracketed(
-                Delimited(
-                    Sequence(
-                        Ref("NakedIdentifierSegment"),
-                        Ref("ColonSegment"),
-                        Ref("DatatypeSegment"),
-                        Ref("CommentGrammar", optional=True),
-                    ),
-                    bracket_pairs_set="angle_bracket_pairs",
-                ),
-                bracket_pairs_set="angle_bracket_pairs",
-                bracket_type="angle",
-            ),
-        ),
+        Ref("StructTypeSegment"),
         Sequence(
             "UNIONTYPE",
             Bracketed(
@@ -462,17 +512,6 @@ class DatatypeSegment(BaseSegment):
                 bracket_type="angle",
             ),
         ),
-        # array types
-        OneOf(
-            AnyNumberOf(
-                Bracketed(
-                    Ref("ExpressionSegment", optional=True), bracket_type="square"
-                )
-            ),
-            Ref("SimpleArrayTypeGrammar"),
-            Sequence(Ref("SimpleArrayTypeGrammar"), Ref("ArrayLiteralSegment")),
-            optional=True,
-        ),
     )
 
 
diff --git a/src/sqlfluff/dialects/dialect_materialize.py b/src/sqlfluff/dialects/dialect_materialize.py
index d84a3d6..991e3e1 100644
--- a/src/sqlfluff/dialects/dialect_materialize.py
+++ b/src/sqlfluff/dialects/dialect_materialize.py
@@ -24,13 +24,13 @@ from sqlfluff.dialects.dialect_materialize_keywords import (
 postgres_dialect = load_raw_dialect("postgres")
 
 materialize_dialect = postgres_dialect.copy_as("materialize")
-materialize_dialect.sets("unreserved_keywords").update(
-    [n.strip().upper() for n in materialize_unreserved_keywords.split("\n")]
+materialize_dialect.update_keywords_set_from_multiline_string(
+    "unreserved_keywords", materialize_unreserved_keywords
 )
 
 materialize_dialect.sets("reserved_keywords").clear()
-materialize_dialect.sets("reserved_keywords").update(
-    [n.strip().upper() for n in materialize_reserved_keywords.split("\n")]
+materialize_dialect.update_keywords_set_from_multiline_string(
+    "reserved_keywords", materialize_reserved_keywords
 )
 
 
diff --git a/src/sqlfluff/dialects/dialect_mysql.py b/src/sqlfluff/dialects/dialect_mysql.py
index 4103dba..13dffb8 100644
--- a/src/sqlfluff/dialects/dialect_mysql.py
+++ b/src/sqlfluff/dialects/dialect_mysql.py
@@ -18,6 +18,7 @@ from sqlfluff.core.parser import (
     Matchable,
     TypedParser,
     OneOf,
+    OptionallyBracketed,
     Ref,
     RegexLexer,
     RegexParser,
@@ -26,6 +27,8 @@ from sqlfluff.core.parser import (
     StringLexer,
     StringParser,
     SymbolSegment,
+    Indent,
+    Dedent,
 )
 from sqlfluff.dialects.dialect_mysql_keywords import (
     mysql_reserved_keywords,
@@ -93,25 +96,45 @@ mysql_dialect.insert_lexer_matchers(
 # Set Keywords
 # Do not clear inherited unreserved ansi keywords. Too many are needed to parse well.
 # Just add MySQL unreserved keywords.
-mysql_dialect.sets("unreserved_keywords").update(
-    [n.strip().upper() for n in mysql_unreserved_keywords.split("\n")]
+mysql_dialect.update_keywords_set_from_multiline_string(
+    "unreserved_keywords", mysql_unreserved_keywords
 )
 
 mysql_dialect.sets("reserved_keywords").clear()
-mysql_dialect.sets("reserved_keywords").update(
-    [n.strip().upper() for n in mysql_reserved_keywords.split("\n")]
+mysql_dialect.update_keywords_set_from_multiline_string(
+    "reserved_keywords", mysql_reserved_keywords
 )
 
-# Remove these reserved keywords to avoid issue in interval.sql
-# TODO - resolve this properly
-mysql_dialect.sets("reserved_keywords").difference_update(
-    ["MINUTE_SECOND", "SECOND_MICROSECOND"]
+# Set the datetime units
+mysql_dialect.sets("datetime_units").clear()
+mysql_dialect.sets("datetime_units").update(
+    [
+        # https://github.com/mysql/mysql-server/blob/1bfe02bdad6604d54913c62614bde57a055c8332/sql/sql_yacc.yy#L12321-L12345
+        # interval:
+        "DAY_HOUR",
+        "DAY_MICROSECOND",
+        "DAY_MINUTE",
+        "DAY_SECOND",
+        "HOUR_MICROSECOND",
+        "HOUR_MINUTE",
+        "HOUR_SECOND",
+        "MINUTE_MICROSECOND",
+        "MINUTE_SECOND",
+        "SECOND_MICROSECOND",
+        "YEAR_MONTH",
+        # interval_time_stamp
+        "DAY",
+        "WEEK",
+        "HOUR",
+        "MINUTE",
+        "MONTH",
+        "QUARTER",
+        "SECOND",
+        "MICROSECOND",
+        "YEAR",
+    ]
 )
 
-# Remove this reserved keyword to avoid issue in create_table_primary_foreign_keys.sql
-# TODO - resolve this properly
-mysql_dialect.sets("reserved_keywords").difference_update(["INDEX"])
-
 
 mysql_dialect.replace(
     QuotedIdentifierSegment=TypedParser(
@@ -161,8 +184,6 @@ mysql_dialect.replace(
             "DATE",
             "TIME",
             "TIMESTAMP",
-            "DATETIME",
-            "INTERVAL",
             optional=True,
         ),
         OneOf(
@@ -262,8 +283,13 @@ class AliasExpressionSegment(BaseSegment):
 
     type = "alias_expression"
     match_grammar = Sequence(
+        Indent,
         Ref.keyword("AS", optional=True),
-        Ref("SingleIdentifierGrammar"),
+        OneOf(
+            Ref("SingleIdentifierGrammar"),
+            Ref("QuotedLiteralSegment"),
+        ),
+        Dedent,
     )
 
 
@@ -286,16 +312,20 @@ class ColumnDefinitionSegment(BaseSegment):
                     optional=True,
                 ),
                 Sequence(Sequence("NOT", optional=True), "NULL", optional=True),
-                Sequence("DEFAULT", optional=True),
-                OneOf(
-                    Sequence(
-                        OneOf("CURRENT_TIMESTAMP", "NOW"),
-                        Bracketed(
-                            Ref("NumericLiteralSegment", optional=True), optional=True
+                Sequence(
+                    "DEFAULT",
+                    OneOf(
+                        Sequence(
+                            OneOf("CURRENT_TIMESTAMP", "NOW"),
+                            Bracketed(
+                                Ref("NumericLiteralSegment", optional=True),
+                                optional=True,
+                            ),
                         ),
+                        Ref("NumericLiteralSegment"),
+                        Ref("QuotedLiteralSegment"),
+                        optional=True,
                     ),
-                    Ref("NumericLiteralSegment"),
-                    Ref("QuotedLiteralSegment"),
                     optional=True,
                 ),
                 Sequence(
@@ -628,6 +658,16 @@ class DeleteStatementSegment(BaseSegment):
     )
 
 
+class ColumnConstraintSegment(ansi.ColumnConstraintSegment):
+    """A column option; each CREATE TABLE column can have 0 or more."""
+
+    match_grammar: Matchable = OneOf(
+        ansi.ColumnConstraintSegment.match_grammar,
+        Sequence("CHARACTER", "SET", Ref("NakedIdentifierSegment")),
+        Sequence("COLLATE", Ref("NakedIdentifierSegment")),
+    )
+
+
 class IndexTypeGrammar(BaseSegment):
     """index_type in table_constraint."""
 
@@ -682,7 +722,9 @@ class TableConstraintSegment(BaseSegment):
     match_grammar = OneOf(
         Sequence(
             Sequence(  # [ CONSTRAINT <Constraint name> ]
-                "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True
+                "CONSTRAINT",
+                Ref("ObjectReferenceSegment", optional=True),
+                optional=True,
             ),
             OneOf(
                 # UNIQUE [INDEX | KEY] [index_name] [index_type] (key_part,...)
@@ -763,6 +805,37 @@ class TableConstraintSegment(BaseSegment):
     )
 
 
+class CreateIndexStatementSegment(ansi.CreateIndexStatementSegment):
+    """A `CREATE INDEX` statement.
+
+    https://dev.mysql.com/doc/refman/8.0/en/create-index.html
+    """
+
+    match_grammar = Sequence(
+        "CREATE",
+        OneOf("UNIQUE", "FULLTEXT", "SPATIAL", optional=True),
+        "INDEX",
+        Ref("IndexReferenceSegment"),
+        Ref("IndexTypeGrammar", optional=True),
+        "ON",
+        Ref("TableReferenceSegment"),
+        Ref("BracketedKeyPartListGrammar"),
+        Ref("IndexOptionsSegment", optional=True),
+        AnySetOf(
+            Sequence(
+                "ALGORITHM",
+                Ref("EqualsSegment", optional=True),
+                OneOf("DEFAULT", "INPLACE", "COPY"),
+            ),
+            Sequence(
+                "LOCK",
+                Ref("EqualsSegment", optional=True),
+                OneOf("DEFAULT", "NONE", "SHARED", "EXCLUSIVE"),
+            ),
+        ),
+    )
+
+
 class IntervalExpressionSegment(BaseSegment):
     """An interval expression segment.
 
@@ -772,15 +845,8 @@ class IntervalExpressionSegment(BaseSegment):
     type = "interval_expression"
     match_grammar = Sequence(
         "INTERVAL",
-        OneOf(
-            # The Numeric Version
-            Sequence(
-                Ref("ExpressionSegment"),
-                OneOf(Ref("QuotedLiteralSegment"), Ref("DatetimeUnitSegment")),
-            ),
-            # The String version
-            Ref("QuotedLiteralSegment"),
-        ),
+        Ref("ExpressionSegment"),
+        Ref("DatetimeUnitSegment"),
     )
 
 
@@ -839,6 +905,7 @@ mysql_dialect.add(
     # key_part: {col_name [(length)] | (expr)} [ASC | DESC]
     # https://dev.mysql.com/doc/refman/8.0/en/create-table.html
     # https://dev.mysql.com/doc/refman/8.0/en/alter-table.html
+    # https://dev.mysql.com/doc/refman/8.0/en/create-index.html
     BracketedKeyPartListGrammar=Bracketed(
         Delimited(
             Sequence(
@@ -848,6 +915,7 @@ mysql_dialect.add(
                         Ref("ColumnReferenceSegment"),
                         Bracketed(Ref("NumericLiteralSegment")),
                     ),
+                    Bracketed(Ref("ExpressionSegment")),
                 ),
                 OneOf("ASC", "DESC", optional=True),
             ),
@@ -1040,6 +1108,12 @@ class StatementSegment(ansi.StatementSegment):
             Ref("FlushStatementSegment"),
             Ref("LoadDataSegment"),
             Ref("ReplaceSegment"),
+            Ref("AlterDatabaseStatementSegment"),
+            Ref("ReturnStatementSegment"),
+        ],
+        remove=[
+            # handle CREATE SCHEMA in CreateDatabaseStatementSegment
+            Ref("CreateSchemaStatementSegment"),
         ],
     )
 
@@ -1286,7 +1360,12 @@ class AlterViewStatementSegment(BaseSegment):
         Ref("TableReferenceSegment"),
         Ref("BracketedColumnReferenceListGrammar", optional=True),
         "AS",
-        Ref("SelectStatementSegment"),
+        OptionallyBracketed(
+            OneOf(
+                Ref("SelectStatementSegment"),
+                Ref("SetExpressionSegment"),
+            )
+        ),
         Ref("WithCheckOptionSegment", optional=True),
     )
 
@@ -1314,7 +1393,12 @@ class CreateViewStatementSegment(BaseSegment):
         Ref("TableReferenceSegment"),
         Ref("BracketedColumnReferenceListGrammar", optional=True),
         "AS",
-        Ref("SelectStatementSegment"),
+        OptionallyBracketed(
+            OneOf(
+                Ref("SelectStatementSegment"),
+                Ref("SetExpressionSegment"),
+            )
+        ),
         Ref("WithCheckOptionSegment", optional=True),
     )
 
@@ -2099,7 +2183,7 @@ class PurgeBinaryLogsStatementSegment(BaseSegment):
             Sequence(
                 "BEFORE",
                 OneOf(
-                    Ref("DateTimeLiteralGrammar"),
+                    Ref("ExpressionSegment"),
                 ),
             ),
         ),
@@ -2490,3 +2574,114 @@ class ColumnReferenceSegment(ansi.ColumnReferenceSegment):
             ),
         ]
     )
+
+
+class CreateDatabaseStatementSegment(ansi.CreateDatabaseStatementSegment):
+    """A `CREATE DATABASE` statement.
+
+    As specified in https://dev.mysql.com/doc/refman/8.0/en/create-database.html
+    """
+
+    match_grammar: Matchable = Sequence(
+        "CREATE",
+        OneOf("DATABASE", "SCHEMA"),
+        Ref("IfNotExistsGrammar", optional=True),
+        Ref("DatabaseReferenceSegment"),
+        AnyNumberOf(Ref("CreateOptionSegment")),
+    )
+
+
+class CreateOptionSegment(BaseSegment):
+    """A database characteristic.
+
+    As specified in https://dev.mysql.com/doc/refman/8.0/en/create-database.html
+    """
+
+    type = "create_option_segment"
+    match_grammar = Sequence(
+        Ref.keyword("DEFAULT", optional=True),
+        OneOf(
+            Sequence(
+                "CHARACTER",
+                "SET",
+                Ref("EqualsSegment", optional=True),
+                Ref("NakedIdentifierSegment"),
+            ),
+            Sequence(
+                "COLLATE",
+                Ref("EqualsSegment", optional=True),
+                Ref("NakedIdentifierSegment"),
+            ),
+            Sequence(
+                "ENCRYPTION",
+                Ref("EqualsSegment", optional=True),
+                Ref("QuotedLiteralSegment"),
+            ),
+        ),
+    )
+
+
+class AlterDatabaseStatementSegment(BaseSegment):
+    """A `ALTER DATABASE` statement.
+
+    As specified in https://dev.mysql.com/doc/refman/8.0/en/alter-database.html
+    """
+
+    type = "alter_database_statement"
+    match_grammar: Matchable = Sequence(
+        "ALTER",
+        OneOf("DATABASE", "SCHEMA"),
+        Ref("DatabaseReferenceSegment", optional=True),
+        AnyNumberOf(Ref("AlterOptionSegment")),
+    )
+
+
+class AlterOptionSegment(BaseSegment):
+    """A database characteristic.
+
+    As specified in https://dev.mysql.com/doc/refman/8.0/en/alter-database.html
+    """
+
+    type = "alter_option_segment"
+    match_grammar = Sequence(
+        OneOf(
+            Sequence(
+                Ref.keyword("DEFAULT", optional=True),
+                "CHARACTER",
+                "SET",
+                Ref("EqualsSegment", optional=True),
+                Ref("NakedIdentifierSegment"),
+            ),
+            Sequence(
+                Ref.keyword("DEFAULT", optional=True),
+                "COLLATE",
+                Ref("EqualsSegment", optional=True),
+                Ref("NakedIdentifierSegment"),
+            ),
+            Sequence(
+                Ref.keyword("DEFAULT", optional=True),
+                "ENCRYPTION",
+                Ref("EqualsSegment", optional=True),
+                Ref("QuotedLiteralSegment"),
+            ),
+            Sequence(
+                "READ",
+                "ONLY",
+                Ref("EqualsSegment", optional=True),
+                OneOf("DEFAULT", Ref("NumericLiteralSegment")),
+            ),
+        ),
+    )
+
+
+class ReturnStatementSegment(BaseSegment):
+    """A RETURN statement.
+
+    As specified in https://dev.mysql.com/doc/refman/8.0/en/return.html
+    """
+
+    type = "return_statement"
+    match_grammar = Sequence(
+        "RETURN",
+        Ref("ExpressionSegment"),
+    )
diff --git a/src/sqlfluff/dialects/dialect_postgres.py b/src/sqlfluff/dialects/dialect_postgres.py
index 703363a..19f154d 100644
--- a/src/sqlfluff/dialects/dialect_postgres.py
+++ b/src/sqlfluff/dialects/dialect_postgres.py
@@ -111,6 +111,14 @@ postgres_dialect.insert_lexer_matchers(
             segment_kwargs={"type": "json_operator"},
         ),
         StringLexer("at", "@", CodeSegment),
+        # https://www.postgresql.org/docs/current/sql-syntax-lexical.html
+        RegexLexer(
+            "bit_string_literal",
+            # binary (e.g. b'1001') or hex (e.g. X'1FF')
+            r"[bBxX]'[0-9a-fA-F]*'",
+            CodeSegment,
+            segment_kwargs={"type": "bit_string_literal"},
+        ),
     ],
     before="like_operator",
 )
@@ -166,7 +174,12 @@ postgres_dialect.patch_lexer_matchers(
             CodeSegment,
             segment_kwargs={"type": "double_quote"},
         ),
-        RegexLexer("code", r"[0-9a-zA-Z_]+[0-9a-zA-Z_$]*", CodeSegment),
+        RegexLexer(
+            "code",
+            r"[a-zA-Z_][0-9a-zA-Z_$]*",
+            CodeSegment,
+            segment_kwargs={"type": "code"},
+        ),
     ]
 )
 
@@ -234,18 +247,44 @@ postgres_dialect.add(
         Ref("MultilineConcatenateNewline"), min_times=1, allow_gaps=False
     ),
     # Add a Full equivalent which also allow keywords
-    NakedIdentifierFullSegment=RegexParser(
-        r"[A-Z_][A-Z0-9_]*",
+    NakedIdentifierFullSegment=TypedParser(
+        "code",
         ansi.IdentifierSegment,
         type="naked_identifier_all",
     ),
+    PropertiesNakedIdentifierSegment=TypedParser(  # allows reserved keywords
+        "code",
+        CodeSegment,
+        type="properties_naked_identifier",
+    ),
     SingleIdentifierFullGrammar=OneOf(
         Ref("NakedIdentifierSegment"),
         Ref("QuotedIdentifierSegment"),
         Ref("NakedIdentifierFullSegment"),
     ),
+    DefinitionArgumentValueGrammar=OneOf(
+        # This comes from def_arg:
+        # https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6331
+        # TODO: this list is incomplete
+        Ref("LiteralGrammar"),
+        # This is a gross simplification of the grammar, which seems overly
+        # permissive for the actual use cases here.  Grammar says this matches
+        # reserved keywords.  Plus also unreserved keywords and IDENT:  func_type -->
+        #     Typename --> SimpleTypename --> GenericType --> type_function_name -->
+        #     { unreserved_keyword | type_func_name_keyword | IDENT }
+        # We'll just match any normal code/keyword string here to keep it simple.
+        Ref("PropertiesNakedIdentifierSegment"),
+    ),
     CascadeRestrictGrammar=OneOf("CASCADE", "RESTRICT"),
+    ExtendedTableReferenceGrammar=OneOf(
+        Ref("TableReferenceSegment"),
+        Sequence("ONLY", OptionallyBracketed(Ref("TableReferenceSegment"))),
+        Sequence(Ref("TableReferenceSegment"), Ref("StarSegment")),
+    ),
     RightArrowSegment=StringParser("=>", SymbolSegment, type="right_arrow"),
+    OnKeywordAsIdentifierSegment=StringParser(
+        "ON", ansi.IdentifierSegment, type="naked_identifier"
+    ),
 )
 
 postgres_dialect.replace(
@@ -310,6 +349,21 @@ postgres_dialect.replace(
                 ),
             ),
         ),
+        Sequence(
+            TypedParser(
+                "bit_string_literal",
+                ansi.LiteralSegment,
+                type="quoted_literal",
+            ),
+            AnyNumberOf(
+                Ref("MultilineConcatenateDelimiterGrammar"),
+                TypedParser(
+                    "bit_string_literal",
+                    ansi.LiteralSegment,
+                    type="quoted_literal",
+                ),
+            ),
+        ),
         Delimited(
             TypedParser(
                 "unicode_single_quote",
@@ -421,7 +475,11 @@ postgres_dialect.replace(
         ],
         before=Ref("ArrayLiteralSegment"),
     ),
-    SimpleArrayTypeGrammar=Ref.keyword("ARRAY"),
+    FromClauseTerminatorGrammar=ansi_dialect.get_grammar(
+        "FromClauseTerminatorGrammar"
+    ).copy(
+        insert=[Ref("ForClauseSegment")],
+    ),
     WhereClauseTerminatorGrammar=OneOf(
         "LIMIT",
         Sequence("GROUP", "BY"),
@@ -432,6 +490,7 @@ postgres_dialect.replace(
         "OVERLAPS",
         "RETURNING",
         Sequence("ON", "CONFLICT"),
+        Ref("ForClauseSegment"),
     ),
     OrderByClauseTerminators=OneOf(
         "LIMIT",
@@ -442,12 +501,25 @@ postgres_dialect.replace(
         Ref("FrameClauseUnitGrammar"),
         "SEPARATOR",
         Sequence("WITH", "DATA"),
+        Ref("ForClauseSegment"),
     ),
     Accessor_Grammar=AnyNumberOf(
         Ref("ArrayAccessorSegment"),
         # Add in semi structured expressions
         Ref("SemiStructuredAccessorSegment"),
     ),
+    # PostgreSQL supports the non-standard "RETURNING" keyword, and therefore the
+    # INSERT/UPDATE/DELETE statements can also be used in subqueries.
+    NonWithSelectableGrammar=OneOf(
+        Ref("SetExpressionSegment"),
+        OptionallyBracketed(Ref("SelectStatementSegment")),
+        Ref("NonSetSelectableGrammar"),
+        # moved from NonWithNonSelectableGrammar:
+        Ref("UpdateStatementSegment"),
+        Ref("InsertStatementSegment"),
+        Ref("DeleteStatementSegment"),
+    ),
+    NonWithNonSelectableGrammar=OneOf(),
 )
 
 
@@ -514,44 +586,30 @@ class ArrayAccessorSegment(ansi.ArrayAccessorSegment):
     numbers on either side of the slice segment are optional.
     """
 
-    match_grammar = Sequence(
-        AnyNumberOf(
-            Bracketed(
-                Sequence(
-                    OneOf(
-                        OneOf(
-                            Ref("QualifiedNumericLiteralSegment"),
-                            Ref("NumericLiteralSegment"),
-                        ),
-                        Sequence(
-                            OneOf(
-                                Ref("QualifiedNumericLiteralSegment"),
-                                Ref("NumericLiteralSegment"),
-                                optional=True,
-                            ),
-                            Ref("SliceSegment"),
-                            OneOf(
-                                Ref("QualifiedNumericLiteralSegment"),
-                                Ref("NumericLiteralSegment"),
-                            ),
-                        ),
-                        Sequence(
-                            OneOf(
-                                Ref("QualifiedNumericLiteralSegment"),
-                                Ref("NumericLiteralSegment"),
-                            ),
-                            Ref("SliceSegment"),
-                            OneOf(
-                                Ref("QualifiedNumericLiteralSegment"),
-                                Ref("NumericLiteralSegment"),
-                                optional=True,
-                            ),
-                        ),
-                    ),
+    match_grammar = Bracketed(
+        OneOf(
+            # These three are for a single element access: [n]
+            Ref("QualifiedNumericLiteralSegment"),
+            Ref("NumericLiteralSegment"),
+            Ref("ExpressionSegment"),
+            # This is for slice access: [n:m], [:m], [n:], and [:]
+            Sequence(
+                OneOf(
+                    Ref("QualifiedNumericLiteralSegment"),
+                    Ref("NumericLiteralSegment"),
+                    Ref("ExpressionSegment"),
+                    optional=True,
                 ),
-                bracket_type="square",
-            )
-        )
+                Ref("SliceSegment"),
+                OneOf(
+                    Ref("QualifiedNumericLiteralSegment"),
+                    Ref("NumericLiteralSegment"),
+                    Ref("ExpressionSegment"),
+                    optional=True,
+                ),
+            ),
+        ),
+        bracket_type="square",
     )
 
 
@@ -578,7 +636,7 @@ class DateTimeLiteralGrammar(BaseSegment):
 
     type = "datetime_literal"
     match_grammar = Sequence(
-        Ref("DateTimeTypeIdentifier"),
+        Ref("DateTimeTypeIdentifier", optional=True),
         Ref("QuotedLiteralSegment"),
     )
 
@@ -623,15 +681,12 @@ class DatatypeSegment(ansi.DatatypeSegment):
                     # numeric types [(precision)]
                     Sequence(
                         OneOf("FLOAT"),
-                        Bracketed(Ref("NumericLiteralSegment"), optional=True),
+                        Ref("BracketedArguments", optional=True),
                     ),
                     # numeric types [precision ["," scale])]
                     Sequence(
                         OneOf("DECIMAL", "NUMERIC"),
-                        Bracketed(
-                            Delimited(Ref("NumericLiteralSegment")),
-                            optional=True,
-                        ),
+                        Ref("BracketedArguments", optional=True),
                     ),
                     # monetary type
                     "MONEY",
@@ -640,11 +695,15 @@ class DatatypeSegment(ansi.DatatypeSegment):
                         Sequence(
                             OneOf(
                                 "CHAR",
+                                # CHAR VARYING is not documented, but it's
+                                # in the real grammar:
+                                # https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L14262
+                                Sequence("CHAR", "VARYING"),
                                 "CHARACTER",
                                 Sequence("CHARACTER", "VARYING"),
                                 "VARCHAR",
                             ),
-                            Bracketed(Ref("NumericLiteralSegment"), optional=True),
+                            Ref("BracketedArguments", optional=True),
                         ),
                         "TEXT",
                     ),
@@ -662,10 +721,7 @@ class DatatypeSegment(ansi.DatatypeSegment):
                     Sequence(
                         "BIT",
                         OneOf("VARYING", optional=True),
-                        Bracketed(
-                            Ref("NumericLiteralSegment"),
-                            optional=True,
-                        ),
+                        Ref("BracketedArguments", optional=True),
                     ),
                     # uuid type
                     "UUID",
@@ -694,13 +750,158 @@ class DatatypeSegment(ansi.DatatypeSegment):
                     Ref("ExpressionSegment", optional=True), bracket_type="square"
                 )
             ),
-            Ref("SimpleArrayTypeGrammar"),
-            Sequence(Ref("SimpleArrayTypeGrammar"), Ref("ArrayLiteralSegment")),
+            Ref("ArrayTypeSegment"),
+            Ref("SizedArrayTypeSegment"),
+            optional=True,
+        ),
+    )
+
+
+class ArrayTypeSegment(ansi.ArrayTypeSegment):
+    """Prefix for array literals specifying the type."""
+
+    type = "array_type"
+    match_grammar = Ref.keyword("ARRAY")
+
+
+class IndexAccessMethodSegment(BaseSegment):
+    """Index access method (e.g. `USING gist`)."""
+
+    type = "index_access_method"
+    match_grammar = Ref("SingleIdentifierGrammar")
+
+
+class OperatorClassReferenceSegment(ObjectReferenceSegment):
+    """A reference to an operator class."""
+
+    type = "operator_class_reference"
+
+
+class DefinitionParameterSegment(BaseSegment):
+    """A single definition parameter.
+
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6320
+    """
+
+    type = "definition_parameter"
+    match_grammar: Matchable = Sequence(
+        Ref("PropertiesNakedIdentifierSegment"),
+        Sequence(
+            Ref("EqualsSegment"),
+            # could also contain ParameterNameSegment:
+            Ref("DefinitionArgumentValueGrammar"),
+            optional=True,
+        ),
+    )
+
+
+class DefinitionParametersSegment(BaseSegment):
+    """List of definition parameters.
+
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6313
+    """
+
+    type = "definition_parameters"
+    match_grammar: Matchable = Bracketed(
+        Delimited(
+            Ref("DefinitionParameterSegment"),
+        )
+    )
+
+
+class CreateCastStatementSegment(ansi.CreateCastStatementSegment):
+    """A `CREATE CAST` statement.
+
+    https://www.postgresql.org/docs/15/sql-createcast.html
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L8951
+    """
+
+    match_grammar: Matchable = Sequence(
+        "CREATE",
+        "CAST",
+        Bracketed(
+            Ref("DatatypeSegment"),
+            "AS",
+            Ref("DatatypeSegment"),
+        ),
+        OneOf(
+            Sequence(
+                "WITH",
+                "FUNCTION",
+                Ref("FunctionNameSegment"),
+                Ref("FunctionParameterListGrammar", optional=True),
+            ),
+            Sequence("WITHOUT", "FUNCTION"),
+            Sequence("WITH", "INOUT"),
+        ),
+        OneOf(
+            Sequence("AS", "ASSIGNMENT", optional=True),
+            Sequence("AS", "IMPLICIT", optional=True),
             optional=True,
         ),
     )
 
 
+class DropCastStatementSegment(ansi.DropCastStatementSegment):
+    """A `DROP CAST` statement.
+
+    https://www.postgresql.org/docs/15/sql-dropcast.html
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L8995
+    """
+
+    match_grammar: Matchable = Sequence(
+        "DROP",
+        "CAST",
+        Sequence("IF", "EXISTS", optional=True),
+        Bracketed(
+            Ref("DatatypeSegment"),
+            "AS",
+            Ref("DatatypeSegment"),
+        ),
+        Ref("DropBehaviorGrammar", optional=True),
+    )
+
+
+class RelationOptionSegment(BaseSegment):
+    """Relation option element from reloptions.
+
+    It is very similar to DefinitionParameterSegment except that it allows qualified
+    names (e.g. namespace.attr = 5).
+
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L3016-L3035
+    """
+
+    type = "relation_option"
+    match_grammar: Matchable = Sequence(
+        Ref("PropertiesNakedIdentifierSegment"),
+        Sequence(
+            Ref("DotSegment"),
+            Ref("PropertiesNakedIdentifierSegment"),
+            optional=True,
+        ),
+        Sequence(
+            Ref("EqualsSegment"),
+            # could also contain ParameterNameSegment:
+            Ref("DefinitionArgumentValueGrammar"),
+            optional=True,
+        ),
+    )
+
+
+class RelationOptionsSegment(BaseSegment):
+    """List of relation options.
+
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L3003-L3014
+    """
+
+    type = "relation_options"
+    match_grammar: Matchable = Bracketed(
+        Delimited(
+            Ref("RelationOptionSegment"),
+        )
+    )
+
+
 class CreateFunctionStatementSegment(ansi.CreateFunctionStatementSegment):
     """A `CREATE FUNCTION` statement.
 
@@ -1139,14 +1340,44 @@ class IntoClauseSegment(BaseSegment):
     )
 
 
+class ForClauseSegment(BaseSegment):
+    """`FOR ...` clause in `SELECT` statements.
+
+    As specified in
+    https://www.postgresql.org/docs/current/sql-select.html#SQL-FOR-UPDATE-SHARE.
+    """
+
+    type = "for_clause"
+
+    match_grammar = Sequence(
+        "FOR",
+        OneOf(
+            "UPDATE",
+            Sequence("NO", "KEY", "UPDATE"),
+            "SHARE",
+            Sequence("KEY", "SHARE"),
+        ),
+        Sequence(
+            "OF",
+            Delimited(
+                Ref("TableReferenceSegment"),
+            ),
+            optional=True,
+        ),
+        OneOf(
+            "NOWAIT",
+            Sequence("SKIP", "LOCKED"),
+            optional=True,
+        ),
+    )
+
+
 class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment):
     """Overrides ANSI Statement, to allow for SELECT INTO statements."""
 
     match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy()
     match_grammar.terminator = match_grammar.terminator.copy(  # type: ignore
-        insert=[
-            Sequence("ON", "CONFLICT"),
-        ],
+        insert=[Sequence("ON", "CONFLICT"), Ref("WithCheckOptionSegment")],
     )
     parse_grammar = ansi.UnorderedSelectStatementSegment.parse_grammar.copy(
         insert=[
@@ -1161,9 +1392,7 @@ class SelectStatementSegment(ansi.SelectStatementSegment):
 
     match_grammar = ansi.SelectStatementSegment.match_grammar.copy()
     match_grammar.terminator = match_grammar.terminator.copy(  # type: ignore
-        insert=[
-            Sequence("ON", "CONFLICT"),
-        ],
+        insert=[Sequence("ON", "CONFLICT"), Ref("WithCheckOptionSegment")],
     )
     parse_grammar = UnorderedSelectStatementSegment.parse_grammar.copy(
         insert=[
@@ -1171,6 +1400,9 @@ class SelectStatementSegment(ansi.SelectStatementSegment):
             Ref("LimitClauseSegment", optional=True),
             Ref("NamedWindowSegment", optional=True),
         ]
+    ).copy(
+        insert=[Ref("ForClauseSegment", optional=True)],
+        before=Ref("LimitClauseSegment", optional=True),
     )
 
 
@@ -1409,21 +1641,24 @@ class AlterRoleStatementSegment(BaseSegment):
                 Sequence(
                     "IN",
                     "DATABASE",
-                    Ref("ObjectReferenceSegment"),
+                    Ref("DatabaseReferenceSegment"),
                     optional=True,
                 ),
                 OneOf(
                     Sequence(
                         "SET",
-                        Ref("ObjectReferenceSegment"),
+                        Ref("ParameterNameSegment"),
                         OneOf(
                             Sequence(
                                 OneOf("TO", Ref("EqualsSegment")),
                                 OneOf(
-                                    Ref("QuotedLiteralSegment"),
                                     "DEFAULT",
-                                    "ON",
-                                    "OFF",
+                                    Delimited(
+                                        Ref("LiteralGrammar"),
+                                        Ref("NakedIdentifierSegment"),
+                                        # https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L1810-L1815
+                                        Ref("OnKeywordAsIdentifierSegment"),
+                                    ),
                                 ),
                             ),
                             Sequence(
@@ -1432,7 +1667,7 @@ class AlterRoleStatementSegment(BaseSegment):
                             ),
                         ),
                     ),
-                    Sequence("RESET", OneOf(Ref("QuotedLiteralSegment"), "ALL")),
+                    Sequence("RESET", OneOf(Ref("ParameterNameSegment"), "ALL")),
                 ),
                 optional=True,
             ),
@@ -1507,6 +1742,29 @@ class ExplainOptionSegment(BaseSegment):
     )
 
 
+class CreateSchemaStatementSegment(ansi.CreateSchemaStatementSegment):
+    """A `CREATE SCHEMA` statement.
+
+    https://www.postgresql.org/docs/15/sql-createschema.html
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L1493
+    """
+
+    match_grammar: Matchable = Sequence(
+        "CREATE",
+        "SCHEMA",
+        Ref("IfNotExistsGrammar", optional=True),
+        OneOf(
+            Sequence(
+                # schema name defaults to role if not provided
+                Ref("SchemaReferenceSegment", optional=True),
+                "AUTHORIZATION",
+                Ref("RoleReferenceSegment"),
+            ),
+            Ref("SchemaReferenceSegment"),
+        ),
+    )
+
+
 class CreateTableStatementSegment(ansi.CreateTableStatementSegment):
     """A `CREATE TABLE` statement.
 
@@ -1628,21 +1886,7 @@ class CreateTableStatementSegment(ansi.CreateTableStatementSegment):
             ),
             Sequence("USING", Ref("ParameterNameSegment")),
             OneOf(
-                Sequence(
-                    "WITH",
-                    Bracketed(
-                        AnyNumberOf(
-                            Sequence(
-                                Ref("ParameterNameSegment"),
-                                Sequence(
-                                    Ref("EqualsSegment"),
-                                    Ref("LiteralGrammar"),
-                                    optional=True,
-                                ),
-                            )
-                        )
-                    ),
-                ),
+                Sequence("WITH", Ref("RelationOptionsSegment")),
                 Sequence("WITHOUT", "OIDS"),
             ),
             Sequence(
@@ -1688,12 +1932,15 @@ class CreateTableAsStatementSegment(BaseSegment):
                 Sequence(
                     "WITH",
                     Bracketed(
-                        AnyNumberOf(
+                        Delimited(
                             Sequence(
                                 Ref("ParameterNameSegment"),
                                 Sequence(
                                     Ref("EqualsSegment"),
-                                    Ref("LiteralGrammar"),
+                                    OneOf(
+                                        Ref("LiteralGrammar"),
+                                        Ref("NakedIdentifierSegment"),
+                                    ),
                                     optional=True,
                                 ),
                             )
@@ -1880,32 +2127,16 @@ class AlterTableActionSegment(BaseSegment):
                     Ref("IfExistsGrammar", optional=True),
                 ),
                 Sequence("SET", "STATISTICS", Ref("NumericLiteralSegment")),
-                Sequence(
-                    "SET",
-                    Bracketed(
-                        Delimited(
-                            Sequence(
-                                Ref("ParameterNameSegment"),
-                                Ref("EqualsSegment"),
-                                Ref("LiteralGrammar"),
-                            ),
-                        )
-                    ),
-                ),
-                Sequence(
-                    "RESET",
-                    Bracketed(Delimited(Ref("ParameterNameSegment"))),
-                ),
+                Sequence("SET", Ref("RelationOptionsSegment")),
+                # Documentation says you can only provide keys in RESET options, but the
+                # actual grammar lets you pass in values too.
+                Sequence("RESET", Ref("RelationOptionsSegment")),
                 Sequence(
                     "SET", "STORAGE", OneOf("PLAIN", "EXTERNAL", "EXTENDED", "MAIN")
                 ),
             ),
         ),
-        Sequence(
-            "ADD",
-            Ref("TableConstraintSegment"),
-            Sequence("NOT", "VALID", optional=True),
-        ),
+        Sequence("ADD", Ref("TableConstraintSegment")),
         Sequence("ADD", Ref("TableConstraintUsingIndexSegment")),
         Sequence(
             "ALTER",
@@ -1954,22 +2185,10 @@ class AlterTableActionSegment(BaseSegment):
         Sequence("SET", "WITHOUT", OneOf("CLUSTER", "OIDS")),
         Sequence("SET", "TABLESPACE", Ref("TablespaceReferenceSegment")),
         Sequence("SET", OneOf("LOGGED", "UNLOGGED")),
-        Sequence(
-            "SET",
-            Bracketed(
-                Delimited(
-                    Sequence(
-                        Ref("ParameterNameSegment"),
-                        Ref("EqualsSegment"),
-                        Ref("LiteralGrammar"),
-                    ),
-                )
-            ),
-        ),
-        Sequence(
-            "RESET",
-            Bracketed(Delimited(Ref("ParameterNameSegment"))),
-        ),
+        Sequence("SET", Ref("RelationOptionsSegment")),
+        # Documentation says you can only provide keys in RESET options, but the
+        # actual grammar lets you pass in values too.
+        Sequence("RESET", Ref("RelationOptionsSegment")),
         Sequence(
             Ref.keyword("NO", optional=True), "INHERIT", Ref("TableReferenceSegment")
         ),
@@ -1998,6 +2217,17 @@ class AlterTableActionSegment(BaseSegment):
     )
 
 
+class VersionIdentifierSegment(BaseSegment):
+    """A reference to an version."""
+
+    type = "version_identifier"
+    # match grammar (don't allow whitespace)
+    match_grammar: Matchable = OneOf(
+        Ref("QuotedLiteralSegment"),
+        Ref("NakedIdentifierSegment"),
+    )
+
+
 class CreateExtensionStatementSegment(BaseSegment):
     """A `CREATE EXTENSION` statement.
 
@@ -2029,7 +2259,133 @@ class DropExtensionStatementSegment(BaseSegment):
         "EXTENSION",
         Ref("IfExistsGrammar", optional=True),
         Ref("ExtensionReferenceSegment"),
-        Ref("CascadeRestrictGrammar", optional=True),
+        Ref("DropBehaviorGrammar", optional=True),
+    )
+
+
+class PublicationReferenceSegment(ObjectReferenceSegment):
+    """A reference to a publication."""
+
+    type = "publication_reference"
+    match_grammar: Matchable = Ref("SingleIdentifierGrammar")
+
+
+class PublicationTableSegment(BaseSegment):
+    """Specification for a single table object in a publication."""
+
+    type = "publication_table"
+    match_grammar: Matchable = Sequence(
+        Ref("ExtendedTableReferenceGrammar"),
+        Ref("BracketedColumnReferenceListGrammar", optional=True),
+        Sequence("WHERE", Bracketed(Ref("ExpressionSegment")), optional=True),
+    )
+
+
+class PublicationObjectsSegment(BaseSegment):
+    """Specification for one or more objects in a publication.
+
+    Unlike the underlying PG grammar which has one object per PublicationObjSpec and
+    so requires one to track the previous object type if it's a "continuation object
+    type", this grammar groups together the continuation objects, e.g.
+    "TABLE a, b, TABLE c, d" results in two segments: one containing references
+    "a, b", and the other contianing "c, d".
+
+    https://www.postgresql.org/docs/15/sql-createpublication.html
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L10435-L10530
+    """
+
+    type = "publication_objects"
+    match_grammar: Matchable = OneOf(
+        Sequence(
+            "TABLE",
+            Delimited(
+                Ref("PublicationTableSegment"),
+                terminator=Sequence(Ref("CommaSegment"), OneOf("TABLE", "TABLES")),
+            ),
+        ),
+        Sequence(
+            "TABLES",
+            "IN",
+            "SCHEMA",
+            Delimited(
+                OneOf(Ref("SchemaReferenceSegment"), "CURRENT_SCHEMA"),
+                terminator=Sequence(Ref("CommaSegment"), OneOf("TABLE", "TABLES")),
+            ),
+        ),
+    )
+
+
+class CreatePublicationStatementSegment(BaseSegment):
+    """A `CREATE PUBLICATION` statement.
+
+    https://www.postgresql.org/docs/15/sql-createpublication.html
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L10390-L10530
+    """
+
+    type = "create_publication_statement"
+    match_grammar: Matchable = Sequence(
+        "CREATE",
+        "PUBLICATION",
+        Ref("PublicationReferenceSegment"),
+        OneOf(
+            Sequence("FOR", "ALL", "TABLES"),
+            Sequence("FOR", Delimited(Ref("PublicationObjectsSegment"))),
+            optional=True,
+        ),
+        Sequence(
+            "WITH",
+            Ref("DefinitionParametersSegment"),
+            optional=True,
+        ),
+    )
+
+
+class AlterPublicationStatementSegment(BaseSegment):
+    """A `ALTER PUBLICATION` statement.
+
+    https://www.postgresql.org/docs/15/sql-alterpublication.html
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L10549
+    """
+
+    type = "alter_publication_statement"
+    match_grammar: Matchable = Sequence(
+        "ALTER",
+        "PUBLICATION",
+        Ref("PublicationReferenceSegment"),
+        OneOf(
+            Sequence("SET", Ref("DefinitionParametersSegment")),
+            Sequence("ADD", Delimited(Ref("PublicationObjectsSegment"))),
+            Sequence("SET", Delimited(Ref("PublicationObjectsSegment"))),
+            Sequence("DROP", Delimited(Ref("PublicationObjectsSegment"))),
+            Sequence("RENAME", "TO", Ref("PublicationReferenceSegment")),
+            Sequence(
+                "OWNER",
+                "TO",
+                OneOf(
+                    "CURRENT_ROLE",
+                    "CURRENT_USER",
+                    "SESSION_USER",
+                    # must come last; CURRENT_USER isn't reserved:
+                    Ref("RoleReferenceSegment"),
+                ),
+            ),
+        ),
+    )
+
+
+class DropPublicationStatementSegment(BaseSegment):
+    """A `DROP PUBLICATION` statement.
+
+    https://www.postgresql.org/docs/15/sql-droppublication.html
+    """
+
+    type = "drop_publication_statement"
+    match_grammar: Matchable = Sequence(
+        "DROP",
+        "PUBLICATION",
+        Ref("IfExistsGrammar", optional=True),
+        Delimited(Ref("PublicationReferenceSegment")),
+        Ref("DropBehaviorGrammar", optional=True),
     )
 
 
@@ -2049,30 +2405,9 @@ class CreateMaterializedViewStatementSegment(BaseSegment):
         Ref("IfNotExistsGrammar", optional=True),
         Ref("TableReferenceSegment"),
         Ref("BracketedColumnReferenceListGrammar", optional=True),
-        AnyNumberOf(
-            Sequence("USING", Ref("ParameterNameSegment"), optional=True),
-            Sequence("TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True),
-            Sequence(
-                "WITH",
-                Bracketed(
-                    Delimited(
-                        Sequence(
-                            Ref("ParameterNameSegment"),
-                            Sequence(
-                                Ref("DotSegment"),
-                                Ref("ParameterNameSegment"),
-                                optional=True,
-                            ),
-                            Sequence(
-                                Ref("EqualsSegment"),
-                                Ref("LiteralGrammar"),
-                                optional=True,
-                            ),
-                        ),
-                    )
-                ),
-            ),
-        ),
+        Sequence("USING", Ref("ParameterNameSegment"), optional=True),
+        Sequence("WITH", Ref("RelationOptionsSegment"), optional=True),
+        Sequence("TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True),
         "AS",
         OneOf(
             OptionallyBracketed(Ref("SelectableGrammar")),
@@ -2228,21 +2563,59 @@ class RefreshMaterializedViewStatementSegment(BaseSegment):
     )
 
 
-class DropMaterializedViewStatementSegment(BaseSegment):
-    """A `DROP MATERIALIZED VIEW` statement.
+class DropMaterializedViewStatementSegment(BaseSegment):
+    """A `DROP MATERIALIZED VIEW` statement.
+
+    As specified in https://www.postgresql.org/docs/14/sql-dropmaterializedview.html
+    """
+
+    type = "drop_materialized_view_statement"
+
+    match_grammar = Sequence(
+        "DROP",
+        "MATERIALIZED",
+        "VIEW",
+        Ref("IfExistsGrammar", optional=True),
+        Delimited(Ref("TableReferenceSegment")),
+        Ref("DropBehaviorGrammar", optional=True),
+    )
+
+
+class WithCheckOptionSegment(BaseSegment):
+    """WITH [ CASCADED | LOCAL ] CHECK OPTION for Postgres' CREATE VIEWS.
+
+    https://www.postgresql.org/docs/14/sql-createview.html
+    """
+
+    type = "with_check_option"
+    match_grammar: Matchable = Sequence(
+        "WITH", OneOf("CASCADED", "LOCAL"), Sequence("CHECK", "OPTION")
+    )
+
+
+class CreateViewStatementSegment(BaseSegment):
+    """An `Create VIEW` statement.
 
-    As specified in https://www.postgresql.org/docs/14/sql-dropmaterializedview.html
+    As specified in https://www.postgresql.org/docs/14/sql-createview.html
     """
 
-    type = "drop_materialized_view_statement"
+    type = "create_view_statement"
 
     match_grammar = Sequence(
-        "DROP",
-        "MATERIALIZED",
+        "CREATE",
+        Ref("OrReplaceGrammar", optional=True),
+        Ref("TemporaryGrammar", optional=True),
+        Ref.keyword("RECURSIVE", optional=True),
         "VIEW",
-        Ref("IfExistsGrammar", optional=True),
-        Delimited(Ref("TableReferenceSegment")),
-        Ref("DropBehaviorGrammar", optional=True),
+        Ref("TableReferenceSegment"),
+        Ref("BracketedColumnReferenceListGrammar", optional=True),
+        Sequence("WITH", Ref("RelationOptionsSegment"), optional=True),
+        "AS",
+        OneOf(
+            OptionallyBracketed(Ref("SelectableGrammar")),
+            Ref("ValuesClauseSegment"),
+        ),
+        Ref("WithCheckOptionSegment", optional=True),
     )
 
 
@@ -2320,6 +2693,22 @@ class AlterViewStatementSegment(BaseSegment):
     )
 
 
+class DropViewStatementSegment(ansi.DropViewStatementSegment):
+    """A `DROP VIEW` statement.
+
+    https://www.postgresql.org/docs/15/sql-dropview.html
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6698-L6719
+    """
+
+    match_grammar: Matchable = Sequence(
+        "DROP",
+        "VIEW",
+        Ref("IfExistsGrammar", optional=True),
+        Delimited(Ref("TableReferenceSegment")),
+        Ref("DropBehaviorGrammar", optional=True),
+    )
+
+
 class CreateDatabaseStatementSegment(ansi.CreateDatabaseStatementSegment):
     """A `CREATE DATABASE` statement.
 
@@ -2467,6 +2856,61 @@ class DropDatabaseStatementSegment(ansi.DropDatabaseStatementSegment):
     )
 
 
+class VacuumStatementSegment(BaseSegment):
+    """A `VACUUM` statement.
+
+    https://www.postgresql.org/docs/15/sql-vacuum.html
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L11658
+    """
+
+    type = "vacuum_statement"
+    match_grammar = Sequence(
+        "VACUUM",
+        OneOf(
+            Sequence(
+                Ref.keyword("FULL", optional=True),
+                Ref.keyword("FREEZE", optional=True),
+                Ref.keyword("VERBOSE", optional=True),
+                OneOf("ANALYZE", "ANALYSE", optional=True),
+            ),
+            Bracketed(
+                Delimited(
+                    Sequence(
+                        OneOf(
+                            "FULL",
+                            "FREEZE",
+                            "VERBOSE",
+                            "ANALYZE",
+                            "ANALYSE",
+                            "DISABLE_PAGE_SKIPPING",
+                            "SKIP_LOCKED",
+                            "INDEX_CLEANUP",
+                            "PROCESS_TOAST",
+                            "TRUNCATE",
+                            "PARALLEL",
+                        ),
+                        OneOf(
+                            Ref("LiteralGrammar"),
+                            Ref("NakedIdentifierSegment"),
+                            # https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L1810-L1815
+                            Ref("OnKeywordAsIdentifierSegment"),
+                            optional=True,
+                        ),
+                    ),
+                ),
+            ),
+            optional=True,
+        ),
+        Delimited(
+            Sequence(
+                Ref("TableReferenceSegment"),
+                Ref("BracketedColumnReferenceListGrammar", optional=True),
+            ),
+            optional=True,
+        ),
+    )
+
+
 class LikeOptionSegment(BaseSegment):
     """Like Option Segment.
 
@@ -2515,11 +2959,11 @@ class ColumnConstraintSegment(ansi.ColumnConstraintSegment):
             Sequence(  # DEFAULT <value>
                 "DEFAULT",
                 OneOf(
+                    Ref("ShorthandCastSegment"),
                     Ref("LiteralGrammar"),
                     Ref("FunctionSegment"),
                     Ref("BareFunctionSegment"),
-                    Ref("ExpressionSegment")
-                    # ?? Ref('IntervalExpressionSegment')
+                    Ref("ExpressionSegment"),
                 ),
             ),
             Sequence("GENERATED", "ALWAYS", "AS", Ref("ExpressionSegment"), "STORED"),
@@ -2532,8 +2976,35 @@ class ColumnConstraintSegment(ansi.ColumnConstraintSegment):
                     AnyNumberOf(Ref("AlterSequenceOptionsSegment")), optional=True
                 ),
             ),
-            "UNIQUE",
-            Ref("PrimaryKeyGrammar"),
+            Sequence(
+                "UNIQUE",
+                Sequence(
+                    "NULLS",
+                    Ref.keyword("NOT", optional=True),
+                    "DISTINCT",
+                    optional=True,
+                ),
+                Sequence("WITH", Ref("DefinitionParametersSegment"), optional=True),
+                Sequence(
+                    "USING",
+                    "INDEX",
+                    "TABLESPACE",
+                    Ref("TablespaceReferenceSegment"),
+                    optional=True,
+                ),
+            ),
+            Sequence(
+                "PRIMARY",
+                "KEY",
+                Sequence("WITH", Ref("DefinitionParametersSegment"), optional=True),
+                Sequence(
+                    "USING",
+                    "INDEX",
+                    "TABLESPACE",
+                    Ref("TablespaceReferenceSegment"),
+                    optional=True,
+                ),
+            ),
             Ref("ReferenceDefinitionGrammar"),  # REFERENCES reftable [ ( refcolumn) ]
         ),
         OneOf("DEFERRABLE", Sequence("NOT", "DEFERRABLE"), optional=True),
@@ -2604,6 +3075,12 @@ class TableConstraintSegment(ansi.TableConstraintSegment):
             ),
             Sequence(  # UNIQUE ( column_name [, ... ] )
                 "UNIQUE",
+                Sequence(
+                    "NULLS",
+                    Ref.keyword("NOT", optional=True),
+                    "DISTINCT",
+                    optional=True,
+                ),
                 Ref("BracketedColumnReferenceListGrammar"),
                 Ref("IndexParametersSegment", optional=True),
             ),
@@ -2615,18 +3092,10 @@ class TableConstraintSegment(ansi.TableConstraintSegment):
             ),
             Sequence(
                 "EXCLUDE",
-                Sequence("USING", Ref("FunctionSegment"), optional=True),
-                Bracketed(
-                    Delimited(
-                        Sequence(
-                            Ref("ExcludeElementSegment"),
-                            "WITH",
-                            Ref("ComparisonOperatorGrammar"),
-                        )
-                    )
-                ),
+                Sequence("USING", Ref("IndexAccessMethodSegment"), optional=True),
+                Bracketed(Delimited(Ref("ExclusionConstraintElementSegment"))),
                 Ref("IndexParametersSegment", optional=True),
-                Sequence("WHERE", Ref("ExpressionSegment")),
+                Sequence("WHERE", Bracketed(Ref("ExpressionSegment")), optional=True),
             ),
             Sequence(  # FOREIGN KEY ( column_name [, ... ] )
                 # REFERENCES reftable [ ( refcolumn [, ... ] ) ]
@@ -2638,12 +3107,14 @@ class TableConstraintSegment(ansi.TableConstraintSegment):
                     "ReferenceDefinitionGrammar"
                 ),  # REFERENCES reftable [ ( refcolumn) ]
             ),
-            OneOf("DEFERRABLE", Sequence("NOT", "DEFERRABLE"), optional=True),
+        ),
+        AnyNumberOf(
+            OneOf("DEFERRABLE", Sequence("NOT", "DEFERRABLE")),
             OneOf(
-                Sequence("INITIALLY", "DEFERRED"),
-                Sequence("INITIALLY", "IMMEDIATE"),
-                optional=True,
+                Sequence("INITIALLY", "DEFERRED"), Sequence("INITIALLY", "IMMEDIATE")
             ),
+            Sequence("NOT", "VALID"),
+            Sequence("NO", "INHERIT"),
         ),
     )
 
@@ -2684,19 +3155,7 @@ class IndexParametersSegment(BaseSegment):
 
     match_grammar = Sequence(
         Sequence("INCLUDE", Ref("BracketedColumnReferenceListGrammar"), optional=True),
-        Sequence(
-            "WITH",
-            Bracketed(
-                Delimited(
-                    Sequence(
-                        Ref("ParameterNameSegment"),
-                        Ref("EqualsSegment"),
-                        Ref("LiteralGrammar"),
-                    ),
-                )
-            ),
-            optional=True,
-        ),
+        Sequence("WITH", Ref("DefinitionParametersSegment"), optional=True),
         Sequence(
             "USING",
             "INDEX",
@@ -2724,20 +3183,65 @@ class ReferentialActionSegment(BaseSegment):
     )
 
 
-class ExcludeElementSegment(BaseSegment):
-    """Exclude element segment.
+class IndexElementOptionsSegment(BaseSegment):
+    """Index element options segment.
 
-    As found in https://www.postgresql.org/docs/13/sql-altertable.html.
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L8057
     """
 
+    type = "index_element_options"
+
     match_grammar = Sequence(
-        OneOf(Ref("ColumnReferenceSegment"), Bracketed(Ref("ExpressionSegment"))),
-        Ref("ParameterNameSegment", optional=True),
+        Sequence("COLLATE", Ref("CollationReferenceSegment"), optional=True),
+        Sequence(
+            Ref(
+                "OperatorClassReferenceSegment",
+                exclude=Sequence("NULLS", OneOf("FIRST", "LAST")),
+            ),
+            Ref("RelationOptionsSegment", optional=True),  # args for opclass
+            optional=True,
+        ),
         OneOf("ASC", "DESC", optional=True),
         Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True),
     )
 
 
+class IndexElementSegment(BaseSegment):
+    """Index element segment.
+
+    As found in https://www.postgresql.org/docs/15/sql-altertable.html.
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L8089
+    """
+
+    type = "index_element"
+    match_grammar = Sequence(
+        OneOf(
+            Ref("ColumnReferenceSegment"),
+            # TODO: This is still not perfect.  This corresponds to
+            # func_expr_windowless in the grammar and we don't currently
+            # implement everything it provides.
+            Ref("FunctionSegment"),
+            Bracketed(Ref("ExpressionSegment")),
+        ),
+        Ref("IndexElementOptionsSegment", optional=True),
+    )
+
+
+class ExclusionConstraintElementSegment(BaseSegment):
+    """Exclusion constraint element segment.
+
+    As found in https://www.postgresql.org/docs/15/sql-altertable.html.
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L4277
+    """
+
+    type = "exclusion_constraint_element"
+    match_grammar = Sequence(
+        Ref("IndexElementSegment"),
+        "WITH",
+        Ref("ComparisonOperatorGrammar"),
+    )
+
+
 class AlterDefaultPrivilegesStatementSegment(BaseSegment):
     """`ALTER DEFAULT PRIVILEGES` statement.
 
@@ -2884,6 +3388,65 @@ class AlterDefaultPrivilegesRevokeSegment(BaseSegment):
     )
 
 
+class DropOwnedStatementSegment(BaseSegment):
+    """A `DROP OWNED` statement.
+
+    https://www.postgresql.org/docs/15/sql-drop-owned.html
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6667
+    """
+
+    type = "drop_owned_statement"
+
+    match_grammar = Sequence(
+        "DROP",
+        "OWNED",
+        "BY",
+        Delimited(
+            OneOf(
+                "CURRENT_ROLE",
+                "CURRENT_USER",
+                "SESSION_USER",
+                # must come last; CURRENT_USER isn't reserved:
+                Ref("RoleReferenceSegment"),
+            ),
+        ),
+        Ref("DropBehaviorGrammar", optional=True),
+    )
+
+
+class ReassignOwnedStatementSegment(BaseSegment):
+    """A `REASSIGN OWNED` statement.
+
+    https://www.postgresql.org/docs/15/sql-reassign-owned.html
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6678
+    """
+
+    type = "reassign_owned_statement"
+
+    match_grammar = Sequence(
+        "REASSIGN",
+        "OWNED",
+        "BY",
+        Delimited(
+            OneOf(
+                "CURRENT_ROLE",
+                "CURRENT_USER",
+                "SESSION_USER",
+                # must come last; CURRENT_USER isn't reserved:
+                Ref("RoleReferenceSegment"),
+            ),
+        ),
+        "TO",
+        OneOf(
+            "CURRENT_ROLE",
+            "CURRENT_USER",
+            "SESSION_USER",
+            # must come last; CURRENT_USER isn't reserved:
+            Ref("RoleReferenceSegment"),
+        ),
+    )
+
+
 class CommentOnStatementSegment(BaseSegment):
     """`COMMENT ON` statement.
 
@@ -3012,76 +3575,25 @@ class CreateIndexStatementSegment(ansi.CreateIndexStatementSegment):
     match_grammar = Sequence(
         "CREATE",
         Ref.keyword("UNIQUE", optional=True),
-        Ref("OrReplaceGrammar", optional=True),
         "INDEX",
         Ref.keyword("CONCURRENTLY", optional=True),
-        Ref("IfNotExistsGrammar", optional=True),
-        Ref("IndexReferenceSegment", optional=True),
+        Sequence(
+            Ref("IfNotExistsGrammar", optional=True),
+            Ref("IndexReferenceSegment"),
+            optional=True,
+        ),
         "ON",
         Ref.keyword("ONLY", optional=True),
         Ref("TableReferenceSegment"),
-        OneOf(
-            Sequence("USING", Ref("FunctionSegment"), optional=True),
-            Bracketed(
-                Delimited(
-                    Sequence(
-                        OneOf(
-                            Ref("ColumnReferenceSegment"),
-                            OptionallyBracketed(Ref("FunctionSegment")),
-                            Bracketed(Ref("ExpressionSegment")),
-                        ),
-                        AnyNumberOf(
-                            Sequence(
-                                "COLLATE",
-                                OneOf(
-                                    Ref("LiteralGrammar"),
-                                    Ref("QuotedIdentifierSegment"),
-                                ),
-                            ),
-                            Sequence(
-                                Ref("ParameterNameSegment"),
-                                Bracketed(
-                                    Delimited(
-                                        Sequence(
-                                            Ref("ParameterNameSegment"),
-                                            Ref("EqualsSegment"),
-                                            OneOf(
-                                                Ref("LiteralGrammar"),
-                                                Ref("QuotedIdentifierSegment"),
-                                            ),
-                                        ),
-                                    ),
-                                ),
-                            ),
-                            OneOf("ASC", "DESC"),
-                            OneOf(
-                                Sequence("NULLS", "FIRST"), Sequence("NULLS", "LAST")
-                            ),
-                        ),
-                    ),
-                )
-            ),
-        ),
-        AnyNumberOf(
-            Sequence(
-                "INCLUDE",
-                Bracketed(Delimited(Ref("ColumnReferenceSegment"))),
-            ),
-            Sequence(
-                "WITH",
-                Bracketed(
-                    Delimited(
-                        Sequence(
-                            Ref("ParameterNameSegment"),
-                            Ref("EqualsSegment"),
-                            Ref("LiteralGrammar"),
-                        ),
-                    )
-                ),
-            ),
-            Sequence("TABLESPACE", Ref("TableReferenceSegment")),
-            Sequence("WHERE", Ref("ExpressionSegment")),
+        Sequence("USING", Ref("IndexAccessMethodSegment"), optional=True),
+        Bracketed(Delimited(Ref("IndexElementSegment"))),
+        Sequence(
+            "INCLUDE", Bracketed(Delimited(Ref("IndexElementSegment"))), optional=True
         ),
+        Sequence("NULLS", Ref.keyword("NOT", optional=True), "DISTINCT", optional=True),
+        Sequence("WITH", Ref("RelationOptionsSegment"), optional=True),
+        Sequence("TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True),
+        Sequence("WHERE", Ref("ExpressionSegment"), optional=True),
     )
 
 
@@ -3202,6 +3714,24 @@ class ReindexStatementSegment(BaseSegment):
     )
 
 
+class DropIndexStatementSegment(ansi.DropIndexStatementSegment):
+    """A `DROP INDEX` statement.
+
+    https://www.postgresql.org/docs/15/sql-dropindex.html
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6698-L6719
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6808-L6829
+    """
+
+    match_grammar: Matchable = Sequence(
+        "DROP",
+        "INDEX",
+        Ref.keyword("CONCURRENTLY", optional=True),
+        Ref("IfExistsGrammar", optional=True),
+        Delimited(Ref("IndexReferenceSegment")),
+        Ref("DropBehaviorGrammar", optional=True),
+    )
+
+
 class FrameClauseSegment(ansi.FrameClauseSegment):
     """A frame clause for window functions.
 
@@ -3374,6 +3904,8 @@ class StatementSegment(ansi.StatementSegment):
     parse_grammar = ansi.StatementSegment.parse_grammar.copy(
         insert=[
             Ref("AlterDefaultPrivilegesStatementSegment"),
+            Ref("DropOwnedStatementSegment"),
+            Ref("ReassignOwnedStatementSegment"),
             Ref("CommentOnStatementSegment"),
             Ref("AnalyzeStatementSegment"),
             Ref("CreateTableAsStatementSegment"),
@@ -3390,7 +3922,9 @@ class StatementSegment(ansi.StatementSegment):
             Ref("RefreshMaterializedViewStatementSegment"),
             Ref("AlterDatabaseStatementSegment"),
             Ref("DropDatabaseStatementSegment"),
+            Ref("VacuumStatementSegment"),
             Ref("AlterFunctionStatementSegment"),
+            Ref("CreateViewStatementSegment"),
             Ref("AlterViewStatementSegment"),
             Ref("ListenStatementSegment"),
             Ref("NotifyStatementSegment"),
@@ -3408,6 +3942,9 @@ class StatementSegment(ansi.StatementSegment):
             Ref("AlterRoleStatementSegment"),
             Ref("CreateExtensionStatementSegment"),
             Ref("DropExtensionStatementSegment"),
+            Ref("CreatePublicationStatementSegment"),
+            Ref("AlterPublicationStatementSegment"),
+            Ref("DropPublicationStatementSegment"),
             Ref("CreateTypeStatementSegment"),
             Ref("AlterTypeStatementSegment"),
             Ref("AlterSchemaStatementSegment"),
@@ -3570,8 +4107,10 @@ class AsAliasExpressionSegment(BaseSegment):
 
     type = "alias_expression"
     match_grammar = Sequence(
+        Indent,
         "AS",
         Ref("SingleIdentifierGrammar"),
+        Dedent,
     )
 
 
@@ -3715,6 +4254,8 @@ class SetStatementSegment(BaseSegment):
     """Set Statement.
 
     As specified in https://www.postgresql.org/docs/14/sql-set.html
+    Also: https://www.postgresql.org/docs/15/sql-set-role.html (still a VariableSetStmt)
+    https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L1584
     """
 
     type = "set_statement"
@@ -3727,13 +4268,20 @@ class SetStatementSegment(BaseSegment):
                 Ref("ParameterNameSegment"),
                 OneOf("TO", Ref("EqualsSegment")),
                 OneOf(
-                    Delimited(Ref("LiteralGrammar"), Ref("NakedIdentifierSegment")),
                     "DEFAULT",
+                    Delimited(
+                        Ref("LiteralGrammar"),
+                        Ref("NakedIdentifierSegment"),
+                        # https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L1810-L1815
+                        Ref("OnKeywordAsIdentifierSegment"),
+                    ),
                 ),
             ),
             Sequence(
                 "TIME", "ZONE", OneOf(Ref("QuotedLiteralSegment"), "LOCAL", "DEFAULT")
             ),
+            Sequence("SCHEMA", Ref("QuotedLiteralSegment")),
+            Sequence("ROLE", OneOf("NONE", Ref("RoleReferenceSegment"))),
         ),
     )
 
@@ -3947,12 +4495,13 @@ class ResetStatementSegment(BaseSegment):
     """A `RESET` statement.
 
     As Specified in https://www.postgresql.org/docs/14/sql-reset.html
+    Also, RESET ROLE from: https://www.postgresql.org/docs/15/sql-set-role.html
     """
 
     type = "reset_statement"
     match_grammar = Sequence(
         "RESET",
-        OneOf("ALL", Ref("ParameterNameSegment")),
+        OneOf("ALL", "ROLE", Ref("ParameterNameSegment")),
     )
 
 
@@ -4177,7 +4726,7 @@ class CTEDefinitionSegment(ansi.CTEDefinitionSegment):
         Ref("SingleIdentifierGrammar"),
         Ref("CTEColumnList", optional=True),
         "AS",
-        Sequence("NOT", "MATERIALIZED", optional=True),
+        Sequence(Ref.keyword("NOT", optional=True), "MATERIALIZED", optional=True),
         Bracketed(
             # Ephemeral here to subdivide the query.
             Ref("SelectableGrammar", ephemeral_name="SelectableGrammar")
@@ -4651,3 +5200,23 @@ class NamedArgumentSegment(BaseSegment):
         Ref("RightArrowSegment"),
         Ref("ExpressionSegment"),
     )
+
+
+class TableExpressionSegment(ansi.TableExpressionSegment):
+    """The main table expression e.g. within a FROM clause.
+
+    Override from ANSI to allow optional WITH ORDINALITY clause
+    """
+
+    match_grammar: Matchable = OneOf(
+        Ref("ValuesClauseSegment"),
+        Ref("BareFunctionSegment"),
+        Sequence(
+            Ref("FunctionSegment"),
+            Sequence("WITH", "ORDINALITY", optional="True"),
+        ),
+        Ref("TableReferenceSegment"),
+        # Nested Selects
+        Bracketed(Ref("SelectableGrammar")),
+        Bracketed(Ref("MergeStatementSegment")),
+    )
diff --git a/src/sqlfluff/dialects/dialect_postgres_keywords.py b/src/sqlfluff/dialects/dialect_postgres_keywords.py
index b423b52..20e87b3 100644
--- a/src/sqlfluff/dialects/dialect_postgres_keywords.py
+++ b/src/sqlfluff/dialects/dialect_postgres_keywords.py
@@ -97,7 +97,7 @@ postgres_docs_keywords = [
     ("BEGIN", "non-reserved"),
     ("BEGIN_FRAME", "not-keyword"),
     ("BEGIN_PARTITION", "not-keyword"),
-    ("BERNOULLI", "not-keyword"),
+    ("BERNOULLI", "non-reserved"),
     ("BETWEEN", "non-reserved-(cannot-be-function-or-type)"),
     ("BIGINT", "non-reserved-(cannot-be-function-or-type)"),
     ("BIGSERIAL", "non-reserved-(cannot-be-function-or-type)"),
@@ -926,6 +926,8 @@ postgres_nondocs_keywords = [
     ("DEPTH", "non-reserved"),
     ("DESCRIBE", "non-reserved"),
     ("DETERMINISTIC", "non-reserved"),
+    ("DISABLE_PAGE_SKIPPING", "non-reserved"),
+    ("EXECUTION", "not-keyword"),
     ("EXTENDED", "non-reserved"),
     ("FILE", "non-reserved"),
     ("FORCE_NOT_NULL", "non-reserved"),
@@ -935,6 +937,7 @@ postgres_nondocs_keywords = [
     ("HASH", "non-reserved"),
     ("ICU", "non-reserved"),
     ("IGNORE", "non-reserved"),
+    ("INDEX_CLEANUP", "non-reserved"),
     ("IS_TEMPLATE", "non-reserved"),
     ("JSON", "non-reserved"),
     ("LC_COLLATE", "non-reserved"),
@@ -953,6 +956,7 @@ postgres_nondocs_keywords = [
     ("NOREPLICATION", "non-reserved"),
     ("NOSUPERUSER", "non-reserved"),
     ("PLAIN", "non-reserved"),
+    ("PROCESS_TOAST", "non-reserved"),
     ("PROVIDER", "non-reserved"),
     ("PUBLIC", "non-reserved"),
     ("REMAINDER", "non-reserved"),
@@ -963,7 +967,7 @@ postgres_nondocs_keywords = [
     ("SETTINGS", "non-reserved"),
     ("SKIP_LOCKED", "non-reserved"),
     ("SUMMARY", "non-reserved"),
-    ("SUPERUSER", "non-reserverd"),
+    ("SUPERUSER", "non-reserved"),
     ("TIMETZ", "non-reserved"),
     ("TIMESTAMPTZ", "non-reserved"),
     ("TIMING", "non-reserved"),
diff --git a/src/sqlfluff/dialects/dialect_redshift.py b/src/sqlfluff/dialects/dialect_redshift.py
index 0752d90..ca19bbb 100644
--- a/src/sqlfluff/dialects/dialect_redshift.py
+++ b/src/sqlfluff/dialects/dialect_redshift.py
@@ -35,13 +35,13 @@ redshift_dialect = postgres_dialect.copy_as("redshift")
 
 # Set Keywords
 redshift_dialect.sets("unreserved_keywords").clear()
-redshift_dialect.sets("unreserved_keywords").update(
-    [n.strip().upper() for n in redshift_unreserved_keywords.split("\n")]
+redshift_dialect.update_keywords_set_from_multiline_string(
+    "unreserved_keywords", redshift_unreserved_keywords
 )
 
 redshift_dialect.sets("reserved_keywords").clear()
-redshift_dialect.sets("reserved_keywords").update(
-    [n.strip().upper() for n in redshift_reserved_keywords.split("\n")]
+redshift_dialect.update_keywords_set_from_multiline_string(
+    "reserved_keywords", redshift_reserved_keywords
 )
 
 redshift_dialect.sets("bare_functions").clear()
@@ -192,7 +192,12 @@ redshift_dialect.replace(
 redshift_dialect.patch_lexer_matchers(
     [
         # add optional leading # to code for temporary tables
-        RegexLexer("code", r"#?[0-9a-zA-Z_]+[0-9a-zA-Z_$]*", CodeSegment),
+        RegexLexer(
+            "code",
+            r"#?[0-9a-zA-Z_]+[0-9a-zA-Z_$]*",
+            CodeSegment,
+            segment_kwargs={"type": "code"},
+        ),
     ]
 )
 
@@ -332,6 +337,26 @@ class DateTimeTypeIdentifier(BaseSegment):
     )
 
 
+class BracketedArguments(ansi.BracketedArguments):
+    """A series of bracketed arguments.
+
+    e.g. the bracketed part of numeric(1, 3)
+    """
+
+    match_grammar = Bracketed(
+        # The brackets might be empty for some cases...
+        Delimited(
+            OneOf(
+                Ref("LiteralGrammar"),
+                # In redshift, character types offer on optional MAX
+                # keyword in their parameters.
+                "MAX",
+            ),
+            optional=True,
+        ),
+    )
+
+
 class DatatypeSegment(BaseSegment):
     """A data type segment.
 
@@ -358,10 +383,7 @@ class DatatypeSegment(BaseSegment):
         # numeric types [precision ["," scale])]
         Sequence(
             OneOf("DECIMAL", "NUMERIC"),
-            Bracketed(
-                Delimited(Ref("NumericLiteralSegment")),
-                optional=True,
-            ),
+            Ref("BracketedArguments", optional=True),
         ),
         # character types
         OneOf(
@@ -374,13 +396,7 @@ class DatatypeSegment(BaseSegment):
                     Sequence("CHARACTER", "VARYING"),
                     "NVARCHAR",
                 ),
-                Bracketed(
-                    OneOf(
-                        Ref("NumericLiteralSegment"),
-                        "MAX",
-                    ),
-                    optional=True,
-                ),
+                Ref("BracketedArguments", optional=True),
             ),
             "BPCHAR",
             "TEXT",
@@ -404,10 +420,7 @@ class DatatypeSegment(BaseSegment):
                 "VARBINARY",
                 Sequence("BINARY", "VARYING"),
             ),
-            Bracketed(
-                Ref("NumericLiteralSegment"),
-                optional=True,
-            ),
+            Ref("BracketedArguments", optional=True),
         ),
         "ANYELEMENT",
     )
@@ -1482,7 +1495,7 @@ class CreateSchemaStatementSegment(BaseSegment):
     """A `CREATE SCHEMA` statement.
 
     https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_SCHEMA.html
-    TODO: support optional SCHEMA_ELEMENT
+    TODO: support optional SCHEMA_ELEMENT (should mostly be provided by ansi)
     """
 
     type = "create_schema_statement"
@@ -1495,13 +1508,13 @@ class CreateSchemaStatementSegment(BaseSegment):
                 Ref("SchemaReferenceSegment"),
                 Sequence(
                     "AUTHORIZATION",
-                    Ref("ObjectReferenceSegment"),
+                    Ref("RoleReferenceSegment"),
                     optional=True,
                 ),
             ),
             Sequence(
                 "AUTHORIZATION",
-                Ref("ObjectReferenceSegment"),
+                Ref("RoleReferenceSegment"),
             ),
         ),
         Ref("QuotaGrammar", optional=True),
@@ -1939,13 +1952,12 @@ class AnalyzeCompressionStatementSegment(BaseSegment):
     )
 
 
-class VacuumStatementSegment(BaseSegment):
+class VacuumStatementSegment(postgres.VacuumStatementSegment):
     """A `VACUUM` statement.
 
     https://docs.aws.amazon.com/redshift/latest/dg/r_VACUUM_command.html
     """
 
-    type = "vacuum_statement"
     match_grammar = Sequence(
         "VACUUM",
         OneOf(
@@ -2001,7 +2013,6 @@ class StatementSegment(postgres.StatementSegment):
             Ref("FetchStatementSegment"),
             Ref("CloseStatementSegment"),
             Ref("AnalyzeCompressionStatementSegment"),
-            Ref("VacuumStatementSegment"),
             Ref("AlterProcedureStatementSegment"),
             Ref("CallStatementSegment"),
             Ref("CreateRlsPolicyStatementSegment"),
diff --git a/src/sqlfluff/dialects/dialect_snowflake.py b/src/sqlfluff/dialects/dialect_snowflake.py
index 6fd4408..b3a5424 100644
--- a/src/sqlfluff/dialects/dialect_snowflake.py
+++ b/src/sqlfluff/dialects/dialect_snowflake.py
@@ -108,6 +108,19 @@ snowflake_dialect.sets("bracket_pairs").add(
     ("exclude", "StartExcludeBracketSegment", "EndExcludeBracketSegment", True)
 )
 
+# Set the bare functions
+snowflake_dialect.sets("bare_functions").clear()
+snowflake_dialect.sets("bare_functions").update(
+    [
+        "CURRENT_DATE",
+        "CURRENT_TIME",
+        "CURRENT_TIMESTAMP",
+        "CURRENT_USER",
+        "LOCALTIME",
+        "LOCALTIMESTAMP",
+    ]
+)
+
 # Add all Snowflake compression types
 snowflake_dialect.sets("compression_types").clear()
 snowflake_dialect.sets("compression_types").update(
@@ -509,7 +522,7 @@ snowflake_dialect.replace(
             Ref("ConnectByClauseSegment"),
             Ref("FromBeforeExpressionSegment"),
             Ref("FromPivotExpressionSegment"),
-            Ref("FromUnpivotExpressionSegment"),
+            AnyNumberOf(Ref("FromUnpivotExpressionSegment")),
             Ref("SamplingExpressionSegment"),
             min_times=1,
         ),
@@ -636,13 +649,13 @@ snowflake_dialect.replace(
 
 # Add all Snowflake keywords
 snowflake_dialect.sets("unreserved_keywords").clear()
-snowflake_dialect.sets("unreserved_keywords").update(
-    [n.strip().upper() for n in snowflake_unreserved_keywords.split("\n")]
+snowflake_dialect.update_keywords_set_from_multiline_string(
+    "unreserved_keywords", snowflake_unreserved_keywords
 )
 
 snowflake_dialect.sets("reserved_keywords").clear()
-snowflake_dialect.sets("reserved_keywords").update(
-    [n.strip().upper() for n in snowflake_reserved_keywords.split("\n")]
+snowflake_dialect.update_keywords_set_from_multiline_string(
+    "reserved_keywords", snowflake_reserved_keywords
 )
 
 # Add datetime units and their aliases from
@@ -996,6 +1009,9 @@ class StatementSegment(ansi.StatementSegment):
             Ref("PutStatementSegment"),
             Ref("RemoveStatementSegment"),
             Ref("CreateDatabaseFromShareStatementSegment"),
+            Ref("AlterRoleStatementSegment"),
+            Ref("AlterStorageIntegrationSegment"),
+            Ref("ExecuteTaskClauseSegment"),
         ],
         remove=[
             Ref("CreateIndexStatementSegment"),
@@ -1304,7 +1320,10 @@ class SamplingExpressionSegment(ansi.SamplingExpressionSegment):
     match_grammar = Sequence(
         OneOf("SAMPLE", "TABLESAMPLE"),
         OneOf("BERNOULLI", "ROW", "SYSTEM", "BLOCK", optional=True),
-        Bracketed(Ref("NumericLiteralSegment"), Ref.keyword("ROWS", optional=True)),
+        Bracketed(
+            OneOf(Ref("NumericLiteralSegment"), Ref("ReferencedVariableNameSegment")),
+            Ref.keyword("ROWS", optional=True),
+        ),
         Sequence(
             OneOf("REPEATABLE", "SEED"),
             Bracketed(Ref("NumericLiteralSegment")),
@@ -1338,7 +1357,7 @@ class SemiStructuredAccessorSegment(BaseSegment):
     https://docs.snowflake.com/en/user-guide/semistructured-considerations.html
     """
 
-    type = "snowflake_semi_structured_expression"
+    type = "semi_structured_expression"
     match_grammar = Sequence(
         OneOf(
             # If a field is already a VARIANT, this could
@@ -1424,6 +1443,23 @@ class SelectStatementSegment(ansi.SelectStatementSegment):
     )
 
 
+class SelectClauseElementSegment(ansi.SelectClauseElementSegment):
+    """Inherit from ansi but also allow for Snowflake System Functions.
+
+    https://docs.snowflake.com/en/sql-reference/functions-system
+    """
+
+    match_grammar = ansi.SelectClauseElementSegment.match_grammar.copy(
+        insert=[
+            Sequence(
+                Ref("SystemFunctionName"),
+                Bracketed(Ref("QuotedLiteralSegment")),
+            )
+        ],
+        before=Ref("WildcardExpressionSegment"),
+    )
+
+
 class WildcardExpressionSegment(ansi.WildcardExpressionSegment):
     """An extension of the star expression for Snowflake."""
 
@@ -1715,7 +1751,22 @@ class AlterTableTableColumnActionSegment(BaseSegment):
                             "MASKING",
                             "POLICY",
                         ),
-                        # @TODO: Set/Unset TAG support
+                        Sequence(
+                            "COLUMN",
+                            Ref("ColumnReferenceSegment"),
+                            "SET",
+                            "TAG",
+                            Ref("TagReferenceSegment"),
+                            Ref("EqualsSegment"),
+                            Ref("QuotedLiteralSegment"),
+                        ),
+                        Sequence(
+                            "COLUMN",
+                            Ref("ColumnReferenceSegment"),
+                            "UNSET",
+                            "TAG",
+                            Ref("TagReferenceSegment"),
+                        ),
                     ),
                 ),
             ),
@@ -1878,7 +1929,7 @@ class AlterWarehouseStatementSegment(BaseSegment):
                 Ref("NakedIdentifierSegment"),
             ),
             Sequence(
-                Ref("NakedIdentifierSegment"),
+                Ref("NakedIdentifierSegment", optional=True),
                 "SET",
                 OneOf(
                     AnyNumberOf(
@@ -1954,6 +2005,104 @@ class AlterShareStatementSegment(BaseSegment):
     )
 
 
+class AlterStorageIntegrationSegment(BaseSegment):
+    """An `ALTER STORAGE INTEGRATION` statement.
+
+    https://docs.snowflake.com/en/sql-reference/sql/alter-storage-integration
+    """
+
+    type = "alter_storage_integration_statement"
+
+    match_grammar = Sequence(
+        "ALTER",
+        Ref.keyword("STORAGE", optional=True),
+        "INTEGRATION",
+        Ref("IfExistsGrammar", optional=True),
+        Ref("ObjectReferenceSegment"),
+        OneOf(
+            Sequence(
+                "SET",
+                OneOf(
+                    Ref("TagEqualsSegment", optional=True),
+                    AnySetOf(
+                        Sequence(
+                            "COMMENT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment")
+                        ),
+                        Sequence(
+                            "ENABLED",
+                            Ref("EqualsSegment"),
+                            Ref("BooleanLiteralGrammar"),
+                        ),
+                        OneOf(
+                            AnySetOf(
+                                Sequence(
+                                    "STORAGE_AWS_ROLE_ARN",
+                                    Ref("EqualsSegment"),
+                                    Ref("QuotedLiteralSegment"),
+                                ),
+                                Sequence(
+                                    "STORAGE_AWS_OBJECT_ACL",
+                                    Ref("EqualsSegment"),
+                                    Ref("QuotedLiteralSegment"),
+                                ),
+                            ),
+                            AnySetOf(
+                                Sequence(
+                                    "AZURE_TENANT_ID",
+                                    Ref("EqualsSegment"),
+                                    Ref("QuotedLiteralSegment"),
+                                ),
+                            ),
+                        ),
+                        Sequence(
+                            "STORAGE_ALLOWED_LOCATIONS",
+                            Ref("EqualsSegment"),
+                            OneOf(
+                                Bracketed(
+                                    Delimited(
+                                        OneOf(
+                                            Ref("S3Path"),
+                                            Ref("GCSPath"),
+                                            Ref("AzureBlobStoragePath"),
+                                        )
+                                    )
+                                ),
+                                Bracketed(
+                                    Ref("QuotedStarSegment"),
+                                ),
+                            ),
+                        ),
+                        Sequence(
+                            "STORAGE_BLOCKED_LOCATIONS",
+                            Ref("EqualsSegment"),
+                            Bracketed(
+                                Delimited(
+                                    OneOf(
+                                        Ref("S3Path"),
+                                        Ref("GCSPath"),
+                                        Ref("AzureBlobStoragePath"),
+                                    )
+                                )
+                            ),
+                        ),
+                    ),
+                ),
+            ),
+            Sequence(
+                "UNSET",
+                OneOf(
+                    Sequence(
+                        "TAG", Delimited(Ref("TagReferenceSegment")), optional=True
+                    ),
+                    "COMMENT",
+                    "ENABLED",
+                    "STORAGE_BLOCKED_LOCATIONS",
+                ),
+            ),
+        ),
+    )
+
+
 class AlterExternalTableStatementSegment(BaseSegment):
     """An `ALTER EXTERNAL TABLE` statement.
 
@@ -2111,7 +2260,13 @@ class AccessStatementSegment(BaseSegment):
         Sequence("ATTACH", "POLICY"),
         Sequence("EXECUTE", "TASK"),
         Sequence("IMPORT", "SHARE"),
-        Sequence("MANAGE", "GRANTS"),
+        Sequence(
+            "MANAGE",
+            OneOf(
+                "GRANTS",
+                Sequence(OneOf("ACCOUNT", "ORGANIZATION", "USER"), "SUPPORT", "CASES"),
+            ),
+        ),
         Sequence("MONITOR", OneOf("EXECUTION", "USAGE")),
         Sequence("OVERRIDE", "SHARE", "RESTRICTIONS"),
     )
@@ -2400,22 +2555,60 @@ class CreateFunctionStatementSegment(BaseSegment):
             Ref("DatatypeSegment"),
             Sequence("TABLE", Bracketed(Delimited(Ref("ColumnDefinitionSegment")))),
         ),
-        Sequence("NOT", "NULL", optional=True),
-        OneOf("VOLATILE", "IMMUTABLE", optional=True),
-        Sequence("LANGUAGE", OneOf("JAVASCRIPT", "SQL"), optional=True),
-        OneOf(
-            Sequence("CALLED", "ON", "NULL", "INPUT"),
-            Sequence("RETURNS", "NULL", "ON", "NULL", "INPUT"),
-            "STRICT",
+        AnySetOf(
+            Sequence("NOT", "NULL", optional=True),
+            Sequence(
+                "LANGUAGE", OneOf("JAVASCRIPT", "SQL", "PYTHON", "JAVA"), optional=True
+            ),
+            OneOf("VOLATILE", "IMMUTABLE", optional=True),
+            OneOf(
+                Sequence("CALLED", "ON", "NULL", "INPUT"),
+                Sequence("RETURNS", "NULL", "ON", "NULL", "INPUT"),
+                "STRICT",
+                optional=True,
+            ),
+            OneOf("VOLATILE", "IMMUTABLE", optional=True),
+            Sequence(
+                "RUNTIME_VERSION",
+                Ref("EqualsSegment"),
+                Ref("QuotedLiteralSegment"),
+                optional=True,
+            ),
+            Ref("CommentEqualsClauseSegment", optional=True),
+            Sequence(
+                "IMPORTS",
+                Ref("EqualsSegment"),
+                Bracketed(Delimited(Ref("QuotedLiteralSegment"))),
+                optional=True,
+            ),
+            Sequence(
+                "PACKAGES",
+                Ref("EqualsSegment"),
+                Bracketed(Delimited(Ref("QuotedLiteralSegment"))),
+                optional=True,
+            ),
+            Sequence(
+                "HANDLER",
+                Ref("EqualsSegment"),
+                Ref("QuotedLiteralSegment"),
+                optional=True,
+            ),
+            Sequence(
+                "TARGET_PATH",
+                Ref("EqualsSegment"),
+                Ref("QuotedLiteralSegment"),
+                optional=True,
+            ),
             optional=True,
         ),
-        OneOf("VOLATILE", "IMMUTABLE", optional=True),
-        Ref("CommentEqualsClauseSegment", optional=True),
-        "AS",
-        OneOf(
-            Ref("DoubleQuotedUDFBody"),
-            Ref("SingleQuotedUDFBody"),
-            Ref("DollarQuotedUDFBody"),
+        Sequence(
+            "AS",
+            OneOf(
+                Ref("DoubleQuotedUDFBody"),
+                Ref("SingleQuotedUDFBody"),
+                Ref("DollarQuotedUDFBody"),
+            ),
+            optional=True,
         ),
     )
 
@@ -2869,6 +3062,48 @@ class CreateSchemaStatementSegment(ansi.CreateSchemaStatementSegment):
     )
 
 
+class AlterRoleStatementSegment(BaseSegment):
+    """An `ALTER ROLE` statement.
+
+    https://docs.snowflake.com/en/sql-reference/sql/alter-role.html
+    """
+
+    type = "alter_role_statement"
+    match_grammar = Sequence(
+        "ALTER",
+        "ROLE",
+        Ref("IfExistsGrammar", optional=True),
+        Ref("RoleReferenceSegment"),
+        OneOf(
+            Sequence(
+                "SET",
+                OneOf(
+                    Ref("RoleReferenceSegment"),
+                    Ref("TagEqualsSegment"),
+                    Sequence(
+                        "COMMENT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment")
+                    ),
+                ),
+            ),
+            Sequence(
+                "UNSET",
+                OneOf(
+                    Ref("RoleReferenceSegment"),
+                    Sequence("TAG", Delimited(Ref("TagReferenceSegment"))),
+                    Sequence("COMMENT"),
+                ),
+            ),
+            Sequence(
+                "RENAME",
+                "TO",
+                OneOf(
+                    Ref("RoleReferenceSegment"),
+                ),
+            ),
+        ),
+    )
+
+
 class AlterSchemaStatementSegment(BaseSegment):
     """An `ALTER SCHEMA` statement.
 
@@ -3193,10 +3428,50 @@ class CreateStatementSegment(BaseSegment):
         AnySetOf(
             Sequence("TYPE", Ref("EqualsSegment"), "QUEUE"),
             Sequence("ENABLED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")),
+            Sequence(
+                "NOTIFICATION_PROVIDER",
+                Ref("EqualsSegment"),
+                OneOf("AWS_SNS", "AZURE_EVENT_GRID", "GCP_PUBSUB"),
+            ),
+            # AWS specific params:
+            Sequence(
+                "AWS_SNS_TOPIC_ARN",
+                Ref("EqualsSegment"),
+                Ref("QuotedLiteralSegment"),
+            ),
+            Sequence(
+                "AWS_SNS_ROLE_ARN",
+                Ref("EqualsSegment"),
+                Ref("QuotedLiteralSegment"),
+            ),
+            # Azure specific params:
+            Sequence(
+                "AZURE_TENANT_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment")
+            ),
+            OneOf(
+                Sequence(
+                    "AZURE_STORAGE_QUEUE_PRIMARY_URI",
+                    Ref("EqualsSegment"),
+                    Ref("QuotedLiteralSegment"),
+                ),
+                Sequence(
+                    "AZURE_EVENT_GRID_TOPIC_ENDPOINT",
+                    Ref("EqualsSegment"),
+                    Ref("QuotedLiteralSegment"),
+                ),
+            ),
+            # GCP specific params:
             OneOf(
-                Ref("S3NotificationIntegrationParameters"),
-                Ref("GCSNotificationIntegrationParameters"),
-                Ref("AzureNotificationIntegrationParameters"),
+                Sequence(
+                    "GCP_PUBSUB_SUBSCRIPTION_NAME",
+                    Ref("EqualsSegment"),
+                    Ref("QuotedLiteralSegment"),
+                ),
+                Sequence(
+                    "GCP_PUBSUB_TOPIC_NAME",
+                    Ref("EqualsSegment"),
+                    Ref("QuotedLiteralSegment"),
+                ),
             ),
             Sequence(
                 "DIRECTION",
@@ -3235,10 +3510,23 @@ class CreateStatementSegment(BaseSegment):
         AnySetOf(
             Sequence("TYPE", Ref("EqualsSegment"), "EXTERNAL_STAGE"),
             Sequence("ENABLED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")),
-            OneOf(
-                Ref("S3StorageIntegrationParameters"),
-                Ref("GCSStorageIntegrationParameters"),
-                Ref("AzureStorageIntegrationParameters"),
+            Sequence(
+                "STORAGE_PROVIDER", Ref("EqualsSegment"), OneOf("S3", "AZURE", "GCS")
+            ),
+            # Azure specific params:
+            Sequence(
+                "AZURE_TENANT_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment")
+            ),
+            # AWS specific params:
+            Sequence(
+                "STORAGE_AWS_ROLE_ARN",
+                Ref("EqualsSegment"),
+                Ref("QuotedLiteralSegment"),
+            ),
+            Sequence(
+                "STORAGE_AWS_OBJECT_ACL",
+                Ref("EqualsSegment"),
+                StringParser("'bucket-owner-full-control'", ansi.LiteralSegment),
             ),
             Sequence(
                 "STORAGE_ALLOWED_LOCATIONS",
@@ -3365,27 +3653,42 @@ class CreateUserSegment(BaseSegment):
             Sequence(
                 "LOGIN_NAME",
                 Ref("EqualsSegment"),
-                Ref("ObjectReferenceSegment"),
+                OneOf(
+                    Ref("ObjectReferenceSegment"),
+                    Ref("QuotedLiteralSegment"),
+                ),
             ),
             Sequence(
                 "DISPLAY_NAME",
                 Ref("EqualsSegment"),
-                Ref("ObjectReferenceSegment"),
+                OneOf(
+                    Ref("ObjectReferenceSegment"),
+                    Ref("QuotedLiteralSegment"),
+                ),
             ),
             Sequence(
                 "FIRST_NAME",
                 Ref("EqualsSegment"),
-                Ref("ObjectReferenceSegment"),
+                OneOf(
+                    Ref("ObjectReferenceSegment"),
+                    Ref("QuotedLiteralSegment"),
+                ),
             ),
             Sequence(
                 "MIDDLE_NAME",
                 Ref("EqualsSegment"),
-                Ref("ObjectReferenceSegment"),
+                OneOf(
+                    Ref("ObjectReferenceSegment"),
+                    Ref("QuotedLiteralSegment"),
+                ),
             ),
             Sequence(
                 "LAST_NAME",
                 Ref("EqualsSegment"),
-                Ref("ObjectReferenceSegment"),
+                OneOf(
+                    Ref("ObjectReferenceSegment"),
+                    Ref("QuotedLiteralSegment"),
+                ),
             ),
             Sequence(
                 "EMAIL",
@@ -3415,17 +3718,26 @@ class CreateUserSegment(BaseSegment):
             Sequence(
                 "DEFAULT_WAREHOUSE",
                 Ref("EqualsSegment"),
-                Ref("ObjectReferenceSegment"),
+                OneOf(
+                    Ref("ObjectReferenceSegment"),
+                    Ref("QuotedLiteralSegment"),
+                ),
             ),
             Sequence(
                 "DEFAULT_NAMESPACE",
                 Ref("EqualsSegment"),
-                Ref("ObjectReferenceSegment"),
+                OneOf(
+                    Ref("ObjectReferenceSegment"),
+                    Ref("QuotedLiteralSegment"),
+                ),
             ),
             Sequence(
                 "DEFAULT_ROLE",
                 Ref("EqualsSegment"),
-                Ref("ObjectReferenceSegment"),
+                OneOf(
+                    Ref("ObjectReferenceSegment"),
+                    Ref("QuotedLiteralSegment"),
+                ),
             ),
             Sequence(
                 "DEFAULT_SECONDARY_ROLES",
@@ -3449,6 +3761,7 @@ class CreateUserSegment(BaseSegment):
             ),
             Ref("CommentEqualsClauseSegment"),
         ),
+        Dedent,
     )
 
 
@@ -4436,7 +4749,10 @@ class CopyIntoTableStatementSegment(BaseSegment):
             Sequence(
                 "PATTERN",
                 Ref("EqualsSegment"),
-                Ref("QuotedLiteralSegment"),
+                OneOf(
+                    Ref("QuotedLiteralSegment"),
+                    Ref("ReferencedVariableNameSegment"),
+                ),
             ),
             Sequence(
                 "FILE_FORMAT",
@@ -4470,132 +4786,6 @@ class StorageLocation(BaseSegment):
     )
 
 
-class S3StorageIntegrationParameters(BaseSegment):
-    """Parameters for an S3 Storage Integration in Snowflake.
-
-    https://docs.snowflake.com/en/sql-reference/sql/create-storage-integration.html
-    """
-
-    name = "s3_storage_integration_parameters"
-    type = "storage_integration_parameters"
-
-    match_grammar = AnySetOf(
-        Sequence("STORAGE_PROVIDER", Ref("EqualsSegment"), "S3"),
-        Sequence(
-            "STORAGE_AWS_ROLE_ARN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment")
-        ),
-        Sequence(
-            "STORAGE_AWS_OBJECT_ACL",
-            Ref("EqualsSegment"),
-            StringParser("'bucket-owner-full-control'", ansi.LiteralSegment),
-        ),
-    )
-
-
-class GCSStorageIntegrationParameters(BaseSegment):
-    """Parameters for a GCS Storage Integration in Snowflake.
-
-    https://docs.snowflake.com/en/sql-reference/sql/create-storage-integration.html
-    """
-
-    name = "gcs_storage_integration_parameters"
-    type = "storage_integration_parameters"
-
-    match_grammar = Sequence("STORAGE_PROVIDER", Ref("EqualsSegment"), "GCS")
-
-
-class AzureStorageIntegrationParameters(BaseSegment):
-    """Parameters for an Azure Storage Integration in Snowflake.
-
-    https://docs.snowflake.com/en/sql-reference/sql/create-storage-integration.html
-    """
-
-    name = "azure_storage_integration_parameters"
-    type = "storage_integration_parameters"
-
-    match_grammar = AnySetOf(
-        Sequence("STORAGE_PROVIDER", Ref("EqualsSegment"), "AZURE"),
-        Sequence("AZURE_TENANT_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment")),
-    )
-
-
-class S3NotificationIntegrationParameters(BaseSegment):
-    """Parameters for an S3 Notification Integration in Snowflake.
-
-    https://docs.snowflake.com/en/sql-reference/sql/create-notification-integration.html
-    """
-
-    name = "s3_notification_integration_parameters"
-    type = "notification_integration_parameters"
-
-    match_grammar = AnySetOf(
-        Sequence("NOTIFICATION_PROVIDER", Ref("EqualsSegment"), "AWS_SNS"),
-        Sequence(
-            "AWS_SNS_TOPIC_ARN",
-            Ref("EqualsSegment"),
-            Ref("QuotedLiteralSegment"),
-        ),
-        Sequence(
-            "AWS_SNS_ROLE_ARN",
-            Ref("EqualsSegment"),
-            Ref("QuotedLiteralSegment"),
-        ),
-    )
-
-
-class GCSNotificationIntegrationParameters(BaseSegment):
-    """Parameters for a GCS Notification Integration in Snowflake.
-
-    https://docs.snowflake.com/en/sql-reference/sql/create-notification-integration.html
-    """
-
-    name = "gcs_notification_integration_parameters"
-    type = "notification_integration_parameters"
-
-    match_grammar = AnySetOf(
-        Sequence("NOTIFICATION_PROVIDER", Ref("EqualsSegment"), "GCP_PUBSUB"),
-        OneOf(
-            Sequence(
-                "GCP_PUBSUB_SUBSCRIPTION_NAME",
-                Ref("EqualsSegment"),
-                Ref("QuotedLiteralSegment"),
-            ),
-            Sequence(
-                "GCP_PUBSUB_TOPIC_NAME",
-                Ref("EqualsSegment"),
-                Ref("QuotedLiteralSegment"),
-            ),
-        ),
-    )
-
-
-class AzureNotificationIntegrationParameters(BaseSegment):
-    """Parameters for an Azure Notification Integration in Snowflake.
-
-    https://docs.snowflake.com/en/sql-reference/sql/create-notification-integration.html
-    """
-
-    name = "azure_notification_integration_parameters"
-    type = "storage_notification_parameters"
-
-    match_grammar = AnySetOf(
-        Sequence("NOTIFICATION_PROVIDER", Ref("EqualsSegment"), "AZURE_EVENT_GRID"),
-        Sequence("AZURE_TENANT_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment")),
-        OneOf(
-            Sequence(
-                "AZURE_STORAGE_QUEUE_PRIMARY_URI",
-                Ref("EqualsSegment"),
-                Ref("QuotedLiteralSegment"),
-            ),
-            Sequence(
-                "AZURE_EVENT_GRID_TOPIC_ENDPOINT",
-                Ref("EqualsSegment"),
-                Ref("QuotedLiteralSegment"),
-            ),
-        ),
-    )
-
-
 class InternalStageParameters(BaseSegment):
     """Parameters for an internal stage in Snowflake.
 
@@ -5546,6 +5736,24 @@ class AlterTaskUnsetClauseSegment(BaseSegment):
     )
 
 
+class ExecuteTaskClauseSegment(BaseSegment):
+    """Snowflake's EXECUTE TASK clause.
+
+    ```
+        EXECUTE TASK <name>
+    ```
+
+    https://docs.snowflake.com/en/sql-reference/sql/execute-task
+    """
+
+    type = "execute_task_clause"
+    match_grammar = Sequence(
+        "EXECUTE",
+        "TASK",
+        Ref("ParameterNameSegment"),
+    )
+
+
 ############################
 # MERGE
 ############################
@@ -5573,16 +5781,10 @@ class MergeInsertClauseSegment(ansi.MergeInsertClauseSegment):
 
     match_grammar = Sequence(
         "INSERT",
+        Indent,
         Ref("BracketedColumnReferenceListGrammar", optional=True),
-        "VALUES",
-        Bracketed(
-            Delimited(
-                OneOf(
-                    "DEFAULT",
-                    Ref("ExpressionSegment"),
-                ),
-            )
-        ),
+        Dedent,
+        Ref("ValuesClauseSegment", optional=True),
         Ref("WhereClauseSegment", optional=True),
     )
 
diff --git a/src/sqlfluff/dialects/dialect_snowflake_keywords.py b/src/sqlfluff/dialects/dialect_snowflake_keywords.py
index eb0731e..502f0e0 100644
--- a/src/sqlfluff/dialects/dialect_snowflake_keywords.py
+++ b/src/sqlfluff/dialects/dialect_snowflake_keywords.py
@@ -44,7 +44,6 @@ INSERT_ONLY
 INTERSECT
 INTO
 IS
-ISSUE
 JOIN
 LATERAL
 LEFT
@@ -154,6 +153,7 @@ CALLED
 CALLER
 CASCADE
 CASE
+CASES
 CASE_INSENSITIVE
 CASE_SENSITIVE
 CHAIN
@@ -278,6 +278,7 @@ GRANTED
 GRANTS
 GROUPING
 GZIP
+HANDLER
 HEADER
 HEADERS
 HEX
@@ -290,6 +291,7 @@ IGNORE_UTF8_ERRORS
 IMMEDIATE
 IMMUTABLE
 IMPORT
+IMPORTS
 IMPORTED
 INCLUDE_QUERY_ID
 INDEX
@@ -300,6 +302,8 @@ INPUT
 INTEGRATION
 INTEGRATIONS
 INTERVAL
+ISSUE
+JAVA
 JAVASCRIPT
 JSON
 KEY
@@ -385,6 +389,7 @@ OVERRIDE
 OVERWRITE
 OWNER
 OWNERSHIP
+PACKAGES
 PARALLEL
 PARAMETERS
 PARQUET
@@ -413,6 +418,7 @@ PROCEDURES
 PUBLIC
 PURGE
 PUT
+PYTHON
 QUERIES
 QUEUE
 RANGE
@@ -457,6 +463,7 @@ ROW
 RSA_PUBLIC_KEY
 RSA_PUBLIC_KEY_2
 RUNNING
+RUNTIME_VERSION
 S3
 SCALING_POLICY
 SCHEDULE
@@ -514,6 +521,7 @@ STRIP_NULL_VALUES
 STRIP_OUTER_ARRAY
 STRIP_OUTER_ELEMENT
 SUBPATH
+SUPPORT
 SUSPEND
 SUSPENDED
 SWAP
@@ -523,6 +531,7 @@ TABLES
 TABLESPACE
 TABULAR
 TAG
+TARGET_PATH
 TASK
 TASKS
 TEMP
diff --git a/src/sqlfluff/dialects/dialect_sparksql.py b/src/sqlfluff/dialects/dialect_sparksql.py
index 3835828..a9103af 100644
--- a/src/sqlfluff/dialects/dialect_sparksql.py
+++ b/src/sqlfluff/dialects/dialect_sparksql.py
@@ -35,6 +35,7 @@ from sqlfluff.core.parser import (
     Matchable,
     MultiStringParser,
     StringLexer,
+    AnySetOf,
 )
 from sqlfluff.core.parser.segments.raw import CodeSegment, KeywordSegment
 from sqlfluff.dialects.dialect_sparksql_keywords import (
@@ -150,6 +151,21 @@ sparksql_dialect.insert_lexer_matchers(
     ],
     before="code",
 )
+sparksql_dialect.insert_lexer_matchers(
+    [
+        RegexLexer(
+            "file_literal",
+            (
+                r"[a-zA-Z0-9]*:?([a-zA-Z0-9\-_\.]*(\/|\\)){2,}"
+                r"((([a-zA-Z0-9\-_\.]*(:|\?|=|&)[a-zA-Z0-9\-_\.]*)+)"
+                r"|([a-zA-Z0-9\-_\.]*\.[a-z]+))"
+            ),
+            CodeSegment,
+            segment_kwargs={"type": "file_literal"},
+        ),
+    ],
+    before="newline",
+)
 
 # Set the bare functions
 sparksql_dialect.sets("bare_functions").clear()
@@ -216,6 +232,8 @@ sparksql_dialect.replace(
         Ref("LessThanOrEqualToSegment"),
         Ref("NotEqualToSegment"),
         Ref("LikeOperatorSegment"),
+        Sequence("IS", "DISTINCT", "FROM"),
+        Sequence("IS", "NOT", "DISTINCT", "FROM"),
     ),
     FromClauseTerminatorGrammar=OneOf(
         "WHERE",
@@ -339,6 +357,19 @@ sparksql_dialect.replace(
         "QUALIFY",
         "WINDOW",
     ),
+    ArithmeticBinaryOperatorGrammar=OneOf(
+        Ref("PlusSegment"),
+        Ref("MinusSegment"),
+        Ref("DivideSegment"),
+        Ref("MultiplySegment"),
+        Ref("ModuloSegment"),
+        Ref("BitwiseAndSegment"),
+        Ref("BitwiseOrSegment"),
+        Ref("BitwiseXorSegment"),
+        Ref("BitwiseLShiftSegment"),
+        Ref("BitwiseRShiftSegment"),
+        Ref("DivBinaryOperatorSegment"),
+    ),
     BinaryOperatorGrammar=OneOf(
         Ref("ArithmeticBinaryOperatorGrammar"),
         Ref("StringBinaryOperatorGrammar"),
@@ -370,6 +401,9 @@ sparksql_dialect.replace(
 )
 
 sparksql_dialect.add(
+    FileLiteralSegment=TypedParser(
+        "file_literal", ansi.LiteralSegment, type="file_literal"
+    ),
     BackQuotedIdentifierSegment=TypedParser(
         "back_quote",
         ansi.IdentifierSegment,
@@ -481,12 +515,6 @@ sparksql_dialect.add(
             Ref("QuotedLiteralSegment"),
         ),
     ),
-    DataSourceFormatGrammar=OneOf(
-        Ref("FileFormatGrammar"),
-        # NB: JDBC is part of DataSourceV2 but not included
-        # there since there are no significant syntax changes
-        "JDBC",
-    ),
     TimestampAsOfGrammar=Sequence(
         "TIMESTAMP",
         "AS",
@@ -522,9 +550,43 @@ sparksql_dialect.add(
                         Ref("LiteralGrammar", optional=True),
                         Ref("CommentGrammar", optional=True),
                     ),
+                    Ref("IcebergTransformationSegment", optional=True),
+                ),
+            ),
+        ),
+    ),
+    PartitionFieldGrammar=Sequence(
+        "PARTITION",
+        "FIELD",
+        Delimited(
+            OneOf(
+                Ref("ColumnDefinitionSegment"),
+                Sequence(
+                    Ref("ColumnReferenceSegment"),
+                    Ref("EqualsSegment", optional=True),
+                    Ref("LiteralGrammar", optional=True),
+                    Ref("CommentGrammar", optional=True),
+                ),
+                Ref("IcebergTransformationSegment", optional=True),
+            ),
+        ),
+        Sequence(
+            Ref.keyword("WITH", optional=True),
+            Delimited(
+                OneOf(
+                    Ref("ColumnDefinitionSegment"),
+                    Sequence(
+                        Ref("ColumnReferenceSegment"),
+                        Ref("EqualsSegment", optional=True),
+                        Ref("LiteralGrammar", optional=True),
+                        Ref("CommentGrammar", optional=True),
+                    ),
+                    Ref("IcebergTransformationSegment", optional=True),
                 ),
             ),
+            optional=True,
         ),
+        Sequence("AS", Ref("NakedIdentifierSegment"), optional=True),
     ),
     # NB: Redefined from `NakedIdentifierSegment` which uses an anti-template to
     # not match keywords; however, SparkSQL allows keywords to be used in table
@@ -605,7 +667,8 @@ sparksql_dialect.add(
         trim_chars="@",
     ),
     # This is the same as QuotedLiteralSegment but
-    # is given a different `name` to stop L048 flagging
+    # is given a different `name` to stop LT01 flagging
+    # TODO: Work out how the LT01 change influence this.
     SignedQuotedLiteralSegment=OneOf(
         TypedParser(
             "single_quote",
@@ -630,6 +693,65 @@ sparksql_dialect.add(
         "DEFAULT",
         Ref("QuotedLiteralSegment"),
     ),
+    TableDefinitionSegment=Sequence(
+        OneOf(Ref("OrReplaceGrammar"), Ref("OrRefreshGrammar"), optional=True),
+        Ref("TemporaryGrammar", optional=True),
+        Ref.keyword("EXTERNAL", optional=True),
+        Ref.keyword("STREAMING", optional=True),
+        Ref.keyword("LIVE", optional=True),
+        "TABLE",
+        Ref("IfNotExistsGrammar", optional=True),
+        OneOf(
+            Ref("FileReferenceSegment"),
+            Ref("TableReferenceSegment"),
+        ),
+        OneOf(
+            # Columns and comment syntax:
+            Bracketed(
+                Delimited(
+                    Sequence(
+                        OneOf(
+                            Ref("ColumnDefinitionSegment"),
+                            Ref("GeneratedColumnDefinitionSegment"),
+                        ),
+                        Ref("CommentGrammar", optional=True),
+                    ),
+                ),
+            ),
+            # Like Syntax
+            Sequence(
+                "LIKE",
+                OneOf(
+                    Ref("FileReferenceSegment"),
+                    Ref("TableReferenceSegment"),
+                ),
+            ),
+            optional=True,
+        ),
+        Ref("UsingClauseSegment", optional=True),
+        AnySetOf(
+            Ref("RowFormatClauseSegment"),
+            Ref("StoredAsGrammar"),
+            Ref("CommentGrammar"),
+            Ref("OptionsGrammar"),
+            Ref("PartitionSpecGrammar"),
+            Ref("BucketSpecGrammar"),
+            optional=True,
+        ),
+        Indent,
+        AnyNumberOf(
+            Ref("LocationGrammar", optional=True),
+            Ref("CommentGrammar", optional=True),
+            Ref("TablePropertiesGrammar", optional=True),
+        ),
+        Dedent,
+        # Create AS syntax:
+        Sequence(
+            Ref.keyword("AS", optional=True),
+            OptionallyBracketed(Ref("SelectableGrammar")),
+            optional=True,
+        ),
+    ),
 )
 
 # Adding Hint related grammar before comment `block_comment` and
@@ -661,6 +783,13 @@ sparksql_dialect.insert_lexer_matchers(
 )
 
 
+class DivBinaryOperatorSegment(BaseSegment):
+    """DIV type binary_operator."""
+
+    type = "binary_operator"
+    match_grammar = Ref.keyword("DIV")
+
+
 class QualifyClauseSegment(BaseSegment):
     """A `QUALIFY` clause like in `SELECT`."""
 
@@ -707,6 +836,7 @@ class PrimitiveTypeSegment(BaseSegment):
         "TINYINT",
         # TODO : not currently supported; add segment - see NumericLiteralSegment
         # "SHORT",
+        "LONG",
         "SMALLINT",
         "INT",
         "INTEGER",
@@ -718,24 +848,33 @@ class PrimitiveTypeSegment(BaseSegment):
         "TIMESTAMP",
         "STRING",
         Sequence(
-            OneOf("CHAR", "CHARACTER", "VARCHAR"),
-            Bracketed(Ref("NumericLiteralSegment"), optional=True),
+            OneOf("CHAR", "CHARACTER", "VARCHAR", "DECIMAL", "DEC", "NUMERIC"),
+            Ref("BracketedArguments", optional=True),
         ),
         "BINARY",
-        Sequence(
-            OneOf("DECIMAL", "DEC", "NUMERIC"),
-            Bracketed(
-                Ref("NumericLiteralSegment"),
-                Ref("CommaSegment"),
-                Ref("NumericLiteralSegment"),
-                optional=True,
-            ),
-        ),
         "INTERVAL",
     )
 
 
-class DatatypeSegment(PrimitiveTypeSegment):
+class ArrayTypeSegment(hive.ArrayTypeSegment):
+    """ARRAY type as per hive."""
+
+    pass
+
+
+class StructTypeSegment(hive.StructTypeSegment):
+    """STRUCT type as per hive."""
+
+    pass
+
+
+class StructTypeSchemaSegment(hive.StructTypeSchemaSegment):
+    """STRUCT type schema as per hive."""
+
+    pass
+
+
+class DatatypeSegment(BaseSegment):
     """Spark SQL Data types.
 
     https://spark.apache.org/docs/latest/sql-ref-datatypes.html
@@ -744,19 +883,12 @@ class DatatypeSegment(PrimitiveTypeSegment):
     type = "data_type"
     match_grammar = OneOf(
         Ref("PrimitiveTypeSegment"),
-        Sequence(
-            "ARRAY",
-            Bracketed(
-                Ref("DatatypeSegment"),
-                bracket_pairs_set="angle_bracket_pairs",
-                bracket_type="angle",
-            ),
-        ),
+        Ref("ArrayTypeSegment"),
         Sequence(
             "MAP",
             Bracketed(
                 Sequence(
-                    Ref("PrimitiveTypeSegment"),
+                    Ref("DatatypeSegment"),
                     Ref("CommaSegment"),
                     Ref("DatatypeSegment"),
                 ),
@@ -764,23 +896,7 @@ class DatatypeSegment(PrimitiveTypeSegment):
                 bracket_type="angle",
             ),
         ),
-        Sequence(
-            "STRUCT",
-            Bracketed(
-                # CommentGrammar here is valid Spark SQL
-                # even though its not stored in Sparks Catalog
-                Delimited(
-                    Sequence(
-                        Ref("SingleIdentifierGrammar"),
-                        Ref("ColonSegment"),
-                        Ref("DatatypeSegment"),
-                        Ref("CommentGrammar", optional=True),
-                    ),
-                ),
-                bracket_pairs_set="angle_bracket_pairs",
-                bracket_type="angle",
-            ),
-        ),
+        Ref("StructTypeSegment"),
     )
 
 
@@ -831,15 +947,23 @@ class AlterTableStatementSegment(ansi.AlterTableStatementSegment):
                 "TO",
                 Ref("PartitionSpecGrammar"),
             ),
+            # ALTER TABLE - RENAME TO 'column_identifier'
+            Sequence(
+                "RENAME",
+                "COLUMN",
+                Ref("ColumnReferenceSegment"),
+                "TO",
+                Ref("ColumnReferenceSegment"),
+            ),
             # ALTER TABLE - ADD COLUMNS
             Sequence(
                 "ADD",
-                "COLUMNS",
+                OneOf("COLUMNS", "COLUMN"),
                 Indent,
                 OptionallyBracketed(
                     Delimited(
                         Sequence(
-                            Ref("ColumnDefinitionSegment"),
+                            Ref("ColumnFieldDefinitionSegment"),
                             OneOf(
                                 "FIRST",
                                 Sequence(
@@ -921,15 +1045,26 @@ class AlterTableStatementSegment(ansi.AlterTableStatementSegment):
             Sequence(
                 "ADD",
                 Ref("IfNotExistsGrammar", optional=True),
-                AnyNumberOf(Ref("PartitionSpecGrammar"), min_times=1),
+                AnyNumberOf(
+                    Ref("PartitionSpecGrammar"),
+                    Ref("PartitionFieldGrammar"),
+                    min_times=1,
+                ),
             ),
             # ALTER TABLE - DROP PARTITION
             Sequence(
                 "DROP",
                 Ref("IfExistsGrammar", optional=True),
-                Ref("PartitionSpecGrammar"),
+                OneOf(
+                    Ref("PartitionSpecGrammar"),
+                    Ref("PartitionFieldGrammar"),
+                ),
                 Sequence("PURGE", optional=True),
             ),
+            Sequence(
+                "Replace",
+                Ref("PartitionFieldGrammar"),
+            ),
             # ALTER TABLE - REPAIR PARTITION
             Sequence("RECOVER", "PARTITIONS"),
             # ALTER TABLE - SET PROPERTIES
@@ -957,7 +1092,7 @@ class AlterTableStatementSegment(ansi.AlterTableStatementSegment):
                 Ref("PartitionSpecGrammar", optional=True),
                 "SET",
                 "FILEFORMAT",
-                Ref("DataSourceFormatGrammar"),
+                Ref("DataSourceFormatSegment"),
             ),
             # ALTER TABLE - CHANGE FILE LOCATION
             Sequence(
@@ -978,11 +1113,85 @@ class AlterTableStatementSegment(ansi.AlterTableStatementSegment):
                 Bracketed(Ref("ExpressionSegment"), optional=True),
                 Dedent,
             ),
+            # ALTER TABLE - ICEBERG WRITE ORDER / DISTRIBUTION
+            # https://iceberg.apache.org/docs/latest/spark-ddl/#alter-table--write-ordered-by
+            Sequence(
+                "WRITE",
+                AnyNumberOf(
+                    Sequence("DISTRIBUTED", "BY", "PARTITION", optional=True),
+                    Sequence(
+                        Ref.keyword("LOCALLY", optional=True),
+                        "ORDERED",
+                        "BY",
+                        Indent,
+                        Delimited(
+                            Sequence(
+                                Ref("ColumnReferenceSegment"),
+                                OneOf("ASC", "DESC", optional=True),
+                                # NB: This isn't really ANSI, and isn't supported
+                                # in Mysql,but is supported in enough other dialects
+                                # for it to make sense here for now.
+                                Sequence(
+                                    "NULLS", OneOf("FIRST", "LAST"), optional=True
+                                ),
+                            ),
+                            optional=True,
+                        ),
+                        Dedent,
+                        optional=True,
+                    ),
+                    min_times=1,
+                    max_times_per_element=1,
+                ),
+            ),
+            # ALTER TABLE - ICEBERG SET IDENTIFIER FIELDS
+            Sequence(
+                "SET",
+                "IDENTIFIER",
+                "FIELDS",
+                Indent,
+                Delimited(
+                    Sequence(
+                        Ref("ColumnReferenceSegment"),
+                    ),
+                ),
+                Dedent,
+            ),
+            # ALTER TABLE - ICEBERG DROP IDENTIFIER FIELDS
+            Sequence(
+                "DROP",
+                "IDENTIFIER",
+                "FIELDS",
+                Indent,
+                Delimited(
+                    Sequence(
+                        Ref("ColumnReferenceSegment"),
+                    ),
+                ),
+                Dedent,
+            ),
         ),
         Dedent,
     )
 
 
+class ColumnFieldDefinitionSegment(ansi.ColumnDefinitionSegment):
+    """A column field definition, e.g. for CREATE TABLE or ALTER TABLE.
+
+    This supports the iceberg syntax and allows for iceberg syntax such
+    as ADD COLUMN a.b.
+    """
+
+    match_grammar: Matchable = Sequence(
+        Ref("ColumnReferenceSegment"),  # Column name
+        Ref("DatatypeSegment"),  # Column type
+        Bracketed(Anything(), optional=True),  # For types like VARCHAR(100)
+        AnyNumberOf(
+            Ref("ColumnConstraintSegment", optional=True),
+        ),
+    )
+
+
 class AlterViewStatementSegment(BaseSegment):
     """A `ALTER VIEW` statement to change the view schema or properties.
 
@@ -1057,61 +1266,7 @@ class CreateTableStatementSegment(ansi.CreateTableStatementSegment):
     https://docs.delta.io/latest/delta-batch.html#create-a-table
     """
 
-    match_grammar = Sequence(
-        "CREATE",
-        OneOf(Ref("OrReplaceGrammar"), Ref("OrRefreshGrammar"), optional=True),
-        Ref("TemporaryGrammar", optional=True),
-        Ref.keyword("STREAMING", optional=True),
-        Ref.keyword("LIVE", optional=True),
-        "TABLE",
-        Ref("IfNotExistsGrammar", optional=True),
-        OneOf(
-            Ref("FileReferenceSegment"),
-            Ref("TableReferenceSegment"),
-        ),
-        OneOf(
-            # Columns and comment syntax:
-            Bracketed(
-                Delimited(
-                    Sequence(
-                        OneOf(
-                            Ref("ColumnDefinitionSegment"),
-                            Ref("GeneratedColumnDefinitionSegment"),
-                        ),
-                        Ref("CommentGrammar", optional=True),
-                    ),
-                ),
-            ),
-            # Like Syntax
-            Sequence(
-                "LIKE",
-                OneOf(
-                    Ref("FileReferenceSegment"),
-                    Ref("TableReferenceSegment"),
-                ),
-            ),
-            optional=True,
-        ),
-        Sequence("USING", Ref("DataSourceFormatGrammar"), optional=True),
-        Ref("RowFormatClauseSegment", optional=True),
-        Ref("StoredAsGrammar", optional=True),
-        Ref("OptionsGrammar", optional=True),
-        Ref("PartitionSpecGrammar", optional=True),
-        Ref("BucketSpecGrammar", optional=True),
-        Indent,
-        AnyNumberOf(
-            Ref("LocationGrammar", optional=True),
-            Ref("CommentGrammar", optional=True),
-            Ref("TablePropertiesGrammar", optional=True),
-        ),
-        Dedent,
-        # Create AS syntax:
-        Sequence(
-            Ref.keyword("AS", optional=True),
-            OptionallyBracketed(Ref("SelectableGrammar")),
-            optional=True,
-        ),
-    )
+    match_grammar = Sequence("CREATE", Ref("TableDefinitionSegment"))
 
 
 class CreateHiveFormatTableStatementSegment(hive.CreateTableStatementSegment):
@@ -1150,10 +1305,11 @@ class CreateViewStatementSegment(ansi.CreateViewStatementSegment):
             ),
             optional=True,
         ),
+        Sequence("USING", Ref("DataSourceFormatSegment"), optional=True),
+        Ref("OptionsGrammar", optional=True),
         Ref("CommentGrammar", optional=True),
         Ref("TablePropertiesGrammar", optional=True),
-        "AS",
-        OptionallyBracketed(Ref("SelectableGrammar")),
+        Sequence("AS", OptionallyBracketed(Ref("SelectableGrammar")), optional=True),
         Ref("WithNoSchemaBindingClauseSegment", optional=True),
     )
 
@@ -1183,6 +1339,16 @@ class CreateWidgetStatementSegment(BaseSegment):
     )
 
 
+class ReplaceTableStatementSegment(BaseSegment):
+    """A `REPLACE TABLE` statement using the iceberg table format.
+
+    https://iceberg.apache.org/docs/latest/spark-ddl/#replace-table--as-select
+    """
+
+    type = "replace_table_statement"
+    match_grammar = Sequence("REPLACE", Ref("TableDefinitionSegment"))
+
+
 class RemoveWidgetStatementSegment(BaseSegment):
     """A `REMOVE WIDGET` STATEMENT.
 
@@ -1198,6 +1364,22 @@ class RemoveWidgetStatementSegment(BaseSegment):
     )
 
 
+class DropDatabaseStatementSegment(ansi.DropDatabaseStatementSegment):
+    """A `DROP DATABASE` statement.
+
+    https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-drop-database.html
+    """
+
+    type = "drop_database_statement"
+    match_grammar: Matchable = Sequence(
+        "DROP",
+        OneOf("DATABASE", "SCHEMA"),
+        Ref("IfExistsGrammar", optional=True),
+        Ref("DatabaseReferenceSegment"),
+        Ref("DropBehaviorGrammar", optional=True),
+    )
+
+
 class DropFunctionStatementSegment(BaseSegment):
     """A `DROP FUNCTION` STATEMENT.
 
@@ -1313,7 +1495,7 @@ class InsertOverwriteDirectorySegment(BaseSegment):
         "DIRECTORY",
         Ref("QuotedLiteralSegment", optional=True),
         "USING",
-        Ref("DataSourceFormatGrammar"),
+        Ref("DataSourceFormatSegment"),
         Ref("OptionsGrammar", optional=True),
         OneOf(
             AnyNumberOf(
@@ -1432,6 +1614,7 @@ class DistributeByClauseSegment(BaseSegment):
     match_grammar = StartsWith(
         Sequence("DISTRIBUTE", "BY"),
         terminator=OneOf(
+            "SORT",
             "LIMIT",
             "HAVING",
             # For window functions
@@ -1459,6 +1642,7 @@ class DistributeByClauseSegment(BaseSegment):
                 "WINDOW",
                 "LIMIT",
                 Ref("FrameClauseUnitGrammar"),
+                "SORT",
             ),
         ),
         Dedent,
@@ -1480,6 +1664,8 @@ class HintFunctionSegment(BaseSegment):
                 AnyNumberOf(
                     Ref("SingleIdentifierGrammar"),
                     Ref("NumericLiteralSegment"),
+                    Ref("TableReferenceSegment"),
+                    Ref("ColumnReferenceSegment"),
                     min_times=1,
                 ),
             ),
@@ -1638,8 +1824,8 @@ class GroupByClauseSegment(ansi.GroupByClauseSegment):
         "GROUP",
         "BY",
         Indent,
-        Delimited(
-            OneOf(
+        OneOf(
+            Delimited(
                 Ref("ColumnReferenceSegment"),
                 # Can `GROUP BY 1`
                 Ref("NumericLiteralSegment"),
@@ -1648,16 +1834,42 @@ class GroupByClauseSegment(ansi.GroupByClauseSegment):
                 Ref("CubeRollupClauseSegment"),
                 Ref("GroupingSetsClauseSegment"),
             ),
-            terminator=Ref("GroupByClauseTerminatorGrammar"),
+            Sequence(
+                Delimited(
+                    Ref("ColumnReferenceSegment"),
+                    # Can `GROUP BY 1`
+                    Ref("NumericLiteralSegment"),
+                    # Can `GROUP BY coalesce(col, 1)`
+                    Ref("ExpressionSegment"),
+                ),
+                OneOf(
+                    Ref("WithCubeRollupClauseSegment"), Ref("GroupingSetsClauseSegment")
+                ),
+            ),
         ),
-        # TODO: New Rule
-        #  Warn if CubeRollupClauseSegment and
-        #  WithCubeRollupClauseSegment used in same query
-        Ref("WithCubeRollupClauseSegment", optional=True),
         Dedent,
     )
 
 
+class OrderByClauseSegment(ansi.OrderByClauseSegment):
+    """A `ORDER BY` clause like in `SELECT`."""
+
+    match_grammar = ansi.OrderByClauseSegment.match_grammar.copy()
+    match_grammar.terminator = OneOf(  # type: ignore
+        "CLUSTER",
+        "DISTRIBUTE",
+        "SORT",
+        "LIMIT",
+        "HAVING",
+        "QUALIFY",
+        # For window functions
+        "WINDOW",
+        Ref("FrameClauseUnitGrammar"),
+        "SEPARATOR",
+    )
+    parse_grammar = ansi.OrderByClauseSegment.parse_grammar
+
+
 class WithCubeRollupClauseSegment(BaseSegment):
     """A `[WITH CUBE | WITH ROLLUP]` clause after the `GROUP BY` clause.
 
@@ -1829,17 +2041,9 @@ class LateralViewClauseSegment(BaseSegment):
         "VIEW",
         Ref.keyword("OUTER", optional=True),
         Ref("FunctionSegment"),
-        # NB: AliasExpressionSegment is not used here for table
-        # or column alias because `AS` is optional within it
-        # (and in most scenarios). Here it's explicitly defined
-        # for when it is required and not allowed.
+        # This allows for a table name to precede the alias expression.
         Ref("SingleIdentifierGrammar", optional=True),
-        Sequence(
-            "AS",
-            Delimited(
-                Ref("SingleIdentifierGrammar"),
-            ),
-        ),
+        Ref("AliasExpressionSegment", optional=True),
         Dedent,
     )
 
@@ -1859,7 +2063,7 @@ class PivotClauseSegment(BaseSegment):
             Indent,
             Delimited(
                 Sequence(
-                    Ref("FunctionSegment"),
+                    Ref("BaseExpressionElementGrammar"),
                     Ref("AliasExpressionSegment", optional=True),
                 ),
             ),
@@ -1985,7 +2189,10 @@ class AddJarSegment(BaseSegment):
     match_grammar = Sequence(
         "ADD",
         Ref("JarKeywordSegment"),
-        AnyNumberOf(Ref("QuotedLiteralSegment")),
+        AnyNumberOf(
+            Ref("QuotedLiteralSegment"),
+            Ref("FileLiteralSegment"),
+        ),
     )
 
 
@@ -2103,7 +2310,7 @@ class DescribeStatementSegment(BaseSegment):
         OneOf("DESCRIBE", "DESC"),
         OneOf(
             Sequence(
-                "DATABASE",
+                OneOf("DATABASE", "SCHEMA"),
                 Ref.keyword("EXTENDED", optional=True),
                 Ref("DatabaseReferenceSegment"),
             ),
@@ -2459,6 +2666,7 @@ class StatementSegment(ansi.StatementSegment):
             # Databricks - widgets
             Ref("CreateWidgetStatementSegment"),
             Ref("RemoveWidgetStatementSegment"),
+            Ref("ReplaceTableStatementSegment"),
         ],
         remove=[
             Ref("TransactionStatementSegment"),
@@ -2485,38 +2693,36 @@ class JoinClauseSegment(ansi.JoinClauseSegment):
             Ref("JoinTypeKeywords", optional=True),
             Ref("JoinKeywordsGrammar"),
             Indent,
-            Sequence(
-                Ref("FromExpressionElementSegment"),
-                Conditional(Dedent, indented_using_on=False),
-                # NB: this is optional
-                OneOf(
-                    # ON clause
-                    Ref("JoinOnConditionSegment"),
-                    # USING clause
-                    Sequence(
-                        "USING",
-                        Indent,
-                        Bracketed(
-                            # NB: We don't use BracketedColumnReferenceListGrammar
-                            # here because we're just using SingleIdentifierGrammar,
-                            # rather than ObjectReferenceSegment or
-                            # ColumnReferenceSegment. This is a) so that we don't
-                            # lint it as a reference and b) because the column will
-                            # probably be returned anyway during parsing.
-                            Delimited(
-                                Ref("SingleIdentifierGrammar"),
-                                ephemeral_name="UsingClauseContents",
-                            )
-                        ),
-                        Dedent,
+            Ref("FromExpressionElementSegment"),
+            Dedent,
+            Conditional(Indent, indented_using_on=True),
+            # NB: this is optional
+            OneOf(
+                # ON clause
+                Ref("JoinOnConditionSegment"),
+                # USING clause
+                Sequence(
+                    "USING",
+                    Conditional(Indent, indented_using_on=False),
+                    Bracketed(
+                        # NB: We don't use BracketedColumnReferenceListGrammar
+                        # here because we're just using SingleIdentifierGrammar,
+                        # rather than ObjectReferenceSegment or
+                        # ColumnReferenceSegment. This is a) so that we don't
+                        # lint it as a reference and b) because the column will
+                        # probably be returned anyway during parsing.
+                        Delimited(
+                            Ref("SingleIdentifierGrammar"),
+                            ephemeral_name="UsingClauseContents",
+                        )
                     ),
-                    # Unqualified joins *are* allowed. They just might not
-                    # be a good idea.
-                    optional=True,
+                    Conditional(Dedent, indented_using_on=False),
                 ),
-                Conditional(Indent, indented_using_on=False),
+                # Unqualified joins *are* allowed. They just might not
+                # be a good idea.
+                optional=True,
             ),
-            Dedent,
+            Conditional(Dedent, indented_using_on=True),
         ),
         # Note NATURAL joins do not support Join conditions
         Sequence(
@@ -2768,9 +2974,7 @@ class MergeInsertClauseSegment(ansi.MergeInsertClauseSegment):
                 Indent,
                 Ref("BracketedColumnReferenceListGrammar"),
                 Dedent,
-                Indent,
                 Ref("ValuesClauseSegment"),
-                Dedent,
             ),
         ),
     )
@@ -3114,3 +3318,86 @@ class SelectClauseSegment(BaseSegment):
     )
 
     parse_grammar: Matchable = Ref("SelectClauseSegmentGrammar")
+
+
+class UsingClauseSegment(BaseSegment):
+    """`USING` clause segment."""
+
+    type = "using_clause"
+    match_grammar = Sequence("USING", Ref("DataSourceFormatSegment"))
+
+
+class DataSourceFormatSegment(BaseSegment):
+    """Data source format segment."""
+
+    type = "data_source_format"
+    match_grammar = OneOf(
+        Ref("FileFormatGrammar"),
+        # NB: JDBC is part of DataSourceV2 but not included
+        # there since there are no significant syntax changes
+        "JDBC",
+        Ref(
+            "ObjectReferenceSegment"
+        ),  # This allows for formats such as org.apache.spark.sql.jdbc
+    )
+
+
+class IcebergTransformationSegment(BaseSegment):
+    """A Transformation expressions used in PARTITIONED BY.
+
+    This segment is to be used in creating hidden partitions
+    in the iceberg table format.
+    https://iceberg.apache.org/docs/latest/spark-ddl/#partitioned-by
+    """
+
+    type = "iceberg_transformation"
+    match_grammar = OneOf(
+        Sequence(
+            OneOf(
+                "YEARS",
+                "MONTHS",
+                "DAYS",
+                "DATE",
+                "HOURS",
+                "DATE_HOUR",
+            ),
+            Bracketed(Ref("ColumnReferenceSegment")),
+        ),
+        Sequence(
+            OneOf("BUCKET", "TRUNCATE"),
+            Bracketed(
+                Sequence(
+                    Ref("NumericLiteralSegment"),
+                    Ref("CommaSegment"),
+                    Ref("ColumnReferenceSegment"),
+                )
+            ),
+        ),
+    )
+
+
+class FrameClauseSegment(ansi.FrameClauseSegment):
+    """A frame clause for window functions.
+
+    This overrides the ansi dialect frame clause segment as the sparksql
+    frame clause allows for a more expressive frame syntax.
+    https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-window.html
+    """
+
+    type = "frame_clause"
+    _frame_extent = OneOf(
+        Sequence("CURRENT", "ROW"),
+        Sequence(
+            OneOf(
+                Ref("NumericLiteralSegment"),
+                "UNBOUNDED",
+                Ref("IntervalExpressionSegment"),
+            ),
+            OneOf("PRECEDING", "FOLLOWING"),
+        ),
+    )
+
+    match_grammar: Matchable = Sequence(
+        Ref("FrameClauseUnitGrammar"),
+        OneOf(_frame_extent, Sequence("BETWEEN", _frame_extent, "AND", _frame_extent)),
+    )
diff --git a/src/sqlfluff/dialects/dialect_sparksql_keywords.py b/src/sqlfluff/dialects/dialect_sparksql_keywords.py
index 079d013..97f9a6e 100644
--- a/src/sqlfluff/dialects/dialect_sparksql_keywords.py
+++ b/src/sqlfluff/dialects/dialect_sparksql_keywords.py
@@ -22,7 +22,6 @@ RESERVED_KEYWORDS = [
     "CURRENT_TIME",
     "CURRENT_TIMESTAMP",
     "CURRENT_USER",
-    "DISTINCT",
     "ELSE",
     "END",
     "ESCAPE",
@@ -62,7 +61,6 @@ RESERVED_KEYWORDS = [
     "SOME",
     "TABLE",
     "THEN",
-    "TIME",
     "TO",
     "TRAILING",
     "UNION",
@@ -109,9 +107,12 @@ UNRESERVED_KEYWORDS = [
     "CUBE",
     "CURRENT",
     "DATA",
+    "DATE",
+    "DATE_HOUR",
     "DATABASE",
     "DATABASES",
     "DAY",
+    "DAYS",
     "DBPROPERTIES",
     "DEFINED",
     "DELETE",
@@ -121,7 +122,9 @@ UNRESERVED_KEYWORDS = [
     "DFS",
     "DIRECTORIES",
     "DIRECTORY",
+    "DISTINCT",
     "DISTRIBUTE",
+    "DISTRIBUTED",
     "DIV",
     "DROP",
     "ESCAPED",
@@ -132,6 +135,7 @@ UNRESERVED_KEYWORDS = [
     "EXTENDED",
     "EXTERNAL",
     "EXTRACT",
+    "FIELD",
     "FIELDS",
     "FILEFORMAT",
     "FIRST",
@@ -143,6 +147,8 @@ UNRESERVED_KEYWORDS = [
     "GLOBAL",
     "GROUPING",
     "HOUR",
+    "HOURS",
+    "IDENTIFIER",
     "IF",
     "IGNORE",
     "ILIKE",
@@ -163,6 +169,7 @@ UNRESERVED_KEYWORDS = [
     "LIST",
     "LOAD",
     "LOCAL",
+    "LOCALLY",
     "LOCATION",
     "LOCK",
     "LOCKS",
@@ -173,6 +180,7 @@ UNRESERVED_KEYWORDS = [
     "MERGE",
     "MINUTE",
     "MONTH",
+    "MONTHS",
     "MSCK",
     "NAMESPACE",
     "NAMESPACES",
@@ -181,6 +189,7 @@ UNRESERVED_KEYWORDS = [
     "OF",
     "OPTION",
     "OPTIONS",
+    "ORDERED",
     "OUT",
     "OUTPUTFORMAT",
     "OVER",
@@ -248,6 +257,7 @@ UNRESERVED_KEYWORDS = [
     "TEMP",
     "TEMPORARY",
     "TERMINATED",
+    "TIME",
     "TOUCH",
     "TRANSACTION",
     "TRANSACTIONS",
@@ -267,8 +277,10 @@ UNRESERVED_KEYWORDS = [
     "VALUES",
     "VIEW",
     "VIEWS",
+    "WRITE",
     "WINDOW",
     "YEAR",
+    "YEARS",
     "ZONE",
     # Spark Core Data Sources
     # https://spark.apache.org/docs/latest/sql-data-sources.html
diff --git a/src/sqlfluff/dialects/dialect_sqlite.py b/src/sqlfluff/dialects/dialect_sqlite.py
index edd3c40..196b66c 100644
--- a/src/sqlfluff/dialects/dialect_sqlite.py
+++ b/src/sqlfluff/dialects/dialect_sqlite.py
@@ -12,15 +12,27 @@ from sqlfluff.core.parser import (
     OptionallyBracketed,
     Ref,
     Sequence,
+    Delimited,
+    TypedParser,
+    Nothing,
+    AnyNumberOf,
+    Anything,
+    StartsWith,
 )
 from sqlfluff.dialects import dialect_ansi as ansi
+from sqlfluff.dialects.dialect_sqlite_keywords import (
+    RESERVED_KEYWORDS,
+    UNRESERVED_KEYWORDS,
+)
 
 ansi_dialect = load_raw_dialect("ansi")
 
 sqlite_dialect = ansi_dialect.copy_as("sqlite")
 
-sqlite_dialect.sets("reserved_keywords").update(["AUTOINCREMENT"])
-sqlite_dialect.sets("unreserved_keywords").update(["FAIL"])
+sqlite_dialect.sets("reserved_keywords").clear()
+sqlite_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS)
+sqlite_dialect.sets("unreserved_keywords").clear()
+sqlite_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS)
 
 sqlite_dialect.replace(
     BooleanBinaryOperatorGrammar=OneOf(
@@ -29,9 +41,233 @@ sqlite_dialect.replace(
     PrimaryKeyGrammar=Sequence(
         "PRIMARY", "KEY", Sequence("AUTOINCREMENT", optional=True)
     ),
+    TemporaryTransientGrammar=Ref("TemporaryGrammar"),
+    DateTimeLiteralGrammar=Sequence(
+        OneOf("DATE", "DATETIME"),
+        TypedParser(
+            "single_quote", ansi.LiteralSegment, type="date_constructor_literal"
+        ),
+    ),
+    BaseExpressionElementGrammar=OneOf(
+        Ref("LiteralGrammar"),
+        Ref("BareFunctionSegment"),
+        Ref("FunctionSegment"),
+        Ref("ColumnReferenceSegment"),
+        Ref("ExpressionSegment"),
+        Sequence(
+            Ref("DatatypeSegment"),
+            Ref("LiteralGrammar"),
+        ),
+    ),
+    AutoIncrementGrammar=Nothing(),
+    CommentClauseSegment=Nothing(),
+    IntervalExpressionSegment=Nothing(),
+    TimeZoneGrammar=Nothing(),
+    FetchClauseSegment=Nothing(),
+    TrimParametersGrammar=Nothing(),
+    LikeGrammar=Sequence("LIKE"),
+    OverlapsClauseSegment=Nothing(),
+    MLTableExpressionSegment=Nothing(),
+    MergeIntoLiteralGrammar=Nothing(),
+    SamplingExpressionSegment=Nothing(),
+    OrderByClauseTerminators=OneOf(
+        "LIMIT",
+        # For window functions
+        "WINDOW",
+        Ref("FrameClauseUnitGrammar"),
+    ),
+    WhereClauseTerminatorGrammar=OneOf(
+        "LIMIT",
+        Sequence("GROUP", "BY"),
+        Sequence("ORDER", "BY"),
+        "WINDOW",
+    ),
+    FromClauseTerminatorGrammar=OneOf(
+        "WHERE",
+        "LIMIT",
+        Sequence("GROUP", "BY"),
+        Sequence("ORDER", "BY"),
+        "WINDOW",
+        Ref("SetOperatorSegment"),
+        Ref("WithNoSchemaBindingClauseSegment"),
+        Ref("WithDataClauseSegment"),
+    ),
+    SelectClauseElementTerminatorGrammar=OneOf(
+        "FROM",
+        "WHERE",
+        Sequence("ORDER", "BY"),
+        "LIMIT",
+        Ref("SetOperatorSegment"),
+    ),
+    FunctionContentsGrammar=AnyNumberOf(
+        Ref("ExpressionSegment"),
+        # A Cast-like function
+        Sequence(Ref("ExpressionSegment"), "AS", Ref("DatatypeSegment")),
+        # Trim function
+        Sequence(
+            Ref("TrimParametersGrammar"),
+            Ref("ExpressionSegment", optional=True, exclude=Ref.keyword("FROM")),
+            "FROM",
+            Ref("ExpressionSegment"),
+        ),
+        # An extract-like or substring-like function
+        Sequence(
+            OneOf(Ref("DatetimeUnitSegment"), Ref("ExpressionSegment")),
+            "FROM",
+            Ref("ExpressionSegment"),
+        ),
+        Sequence(
+            # Allow an optional distinct keyword here.
+            Ref.keyword("DISTINCT", optional=True),
+            OneOf(
+                # Most functions will be using the delimited route
+                # but for COUNT(*) or similar we allow the star segment
+                # here.
+                Ref("StarSegment"),
+                Delimited(Ref("FunctionContentsExpressionGrammar")),
+            ),
+        ),
+        Ref(
+            "OrderByClauseSegment"
+        ),  # used by string_agg (postgres), group_concat (exasol),listagg (snowflake)..
+        # like a function call: POSITION ( 'QL' IN 'SQL')
+        Sequence(
+            OneOf(
+                Ref("QuotedLiteralSegment"),
+                Ref("SingleIdentifierGrammar"),
+                Ref("ColumnReferenceSegment"),
+            ),
+            "IN",
+            OneOf(
+                Ref("QuotedLiteralSegment"),
+                Ref("SingleIdentifierGrammar"),
+                Ref("ColumnReferenceSegment"),
+            ),
+        ),
+        Ref("IndexColumnDefinitionSegment"),
+    ),
+    Expression_A_Grammar=Sequence(
+        OneOf(
+            Ref("Expression_C_Grammar"),
+            Sequence(
+                OneOf(
+                    Ref("SignedSegmentGrammar"),
+                    # Ref('TildeSegment'),
+                    Ref("NotOperatorGrammar"),
+                    # used in CONNECT BY clauses (EXASOL, Snowflake, Postgres...)
+                ),
+                Ref("Expression_C_Grammar"),
+            ),
+        ),
+        AnyNumberOf(
+            OneOf(
+                Sequence(
+                    OneOf(
+                        Sequence(
+                            Ref.keyword("NOT", optional=True),
+                            Ref("LikeGrammar"),
+                        ),
+                        Sequence(
+                            Ref("BinaryOperatorGrammar"),
+                            Ref.keyword("NOT", optional=True),
+                        ),
+                        # We need to add a lot more here...
+                    ),
+                    Ref("Expression_C_Grammar"),
+                    Sequence(
+                        Ref.keyword("ESCAPE"),
+                        Ref("Expression_C_Grammar"),
+                        optional=True,
+                    ),
+                ),
+                Sequence(
+                    Ref.keyword("NOT", optional=True),
+                    "IN",
+                    Bracketed(
+                        OneOf(
+                            Delimited(
+                                Ref("Expression_A_Grammar"),
+                            ),
+                            Ref("SelectableGrammar"),
+                            ephemeral_name="InExpression",
+                        )
+                    ),
+                ),
+                Sequence(
+                    Ref.keyword("NOT", optional=True),
+                    "IN",
+                    Ref("FunctionSegment"),  # E.g. UNNEST()
+                ),
+                Sequence(
+                    "IS",
+                    Ref.keyword("NOT", optional=True),
+                    Ref("IsClauseGrammar"),
+                ),
+                Ref("IsNullGrammar"),
+                Ref("NotNullGrammar"),
+                Ref("CollateGrammar"),
+                Sequence(
+                    # e.g. NOT EXISTS, but other expressions could be met as
+                    # well by inverting the condition with the NOT operator
+                    "NOT",
+                    Ref("Expression_C_Grammar"),
+                ),
+                Sequence(
+                    Ref.keyword("NOT", optional=True),
+                    "BETWEEN",
+                    Ref("Expression_B_Grammar"),
+                    "AND",
+                    Ref("Expression_A_Grammar"),
+                ),
+            )
+        ),
+    ),
 )
 
 
+class SetOperatorSegment(BaseSegment):
+    """A set operator such as Union, Minus, Except or Intersect."""
+
+    type = "set_operator"
+    match_grammar: Matchable = OneOf(
+        Sequence("UNION", OneOf("DISTINCT", "ALL", optional=True)),
+        Sequence(
+            OneOf(
+                "INTERSECT",
+                "EXCEPT",
+            ),
+            Ref.keyword("ALL", optional=True),
+        ),
+        exclude=Sequence("EXCEPT", Bracketed(Anything())),
+    )
+
+
+class DatatypeSegment(ansi.DatatypeSegment):
+    """A data type segment.
+
+    Supports timestamp with(out) time zone. Doesn't currently support intervals.
+    """
+
+    type = "data_type"
+    match_grammar: Matchable = OneOf(
+        Sequence(
+            "DOUBLE",
+            "PRECISION",
+        ),
+        Sequence("UNSIGNED", "BIG", "INT"),
+        Sequence(
+            OneOf(
+                Sequence(
+                    OneOf("VARYING", "NATIVE"),
+                    OneOf("CHARACTER"),
+                ),
+                Ref("DatatypeIdentifierSegment"),
+            ),
+            Ref("BracketedArguments", optional=True),
+        ),
+    )
+
+
 class TableEndClauseSegment(BaseSegment):
     """Support WITHOUT ROWID at end of tables.
 
@@ -42,6 +278,26 @@ class TableEndClauseSegment(BaseSegment):
     match_grammar: Matchable = Sequence("WITHOUT", "ROWID")
 
 
+class ValuesClauseSegment(ansi.ValuesClauseSegment):
+    """A `VALUES` clause like in `INSERT`."""
+
+    type = "values_clause"
+    match_grammar: Matchable = Sequence(
+        "VALUES",
+        Delimited(
+            Sequence(
+                Bracketed(
+                    Delimited(
+                        "DEFAULT",
+                        Ref("ExpressionSegment"),
+                        ephemeral_name="ValuesClauseElements",
+                    )
+                ),
+            ),
+        ),
+    )
+
+
 class IndexColumnDefinitionSegment(BaseSegment):
     """A column definition for CREATE INDEX.
 
@@ -111,6 +367,25 @@ class ColumnConstraintSegment(ansi.ColumnConstraintSegment):
     )
 
 
+class SelectClauseSegment(ansi.SelectClauseSegment):
+    """A group of elements in a select target statement."""
+
+    type = "select_clause"
+    match_grammar: Matchable = StartsWith(
+        "SELECT",
+        terminator=OneOf(
+            "FROM",
+            "WHERE",
+            Sequence("ORDER", "BY"),
+            "LIMIT",
+            Ref("SetOperatorSegment"),
+        ),
+        enforce_whitespace_preceding_terminator=True,
+    )
+
+    parse_grammar: Matchable = Ref("SelectClauseSegmentGrammar")
+
+
 class TableConstraintSegment(ansi.TableConstraintSegment):
     """Overriding TableConstraintSegment to allow for additional segment parsing."""
 
@@ -149,3 +424,93 @@ class TableConstraintSegment(ansi.TableConstraintSegment):
             optional=True,
         ),
     )
+
+
+class TransactionStatementSegment(ansi.TransactionStatementSegment):
+    """A `COMMIT`, `ROLLBACK` or `TRANSACTION` statement.
+
+    As per https://www.sqlite.org/lang_transaction.html
+    """
+
+    type = "transaction_statement"
+    match_grammar: Matchable = Sequence(
+        OneOf("BEGIN", "COMMIT", "ROLLBACK", "END"),
+        OneOf("TRANSACTION", optional=True),
+        Sequence("TO", "SAVEPOINT", Ref("ObjectReferenceSegment"), optional=True),
+    )
+
+
+class PragmaReferenceSegment(ansi.ObjectReferenceSegment):
+    """A Pragma object."""
+
+    type = "pragma_reference"
+
+
+class PragmaStatementSegment(BaseSegment):
+    """A Pragma Statement.
+
+    As per https://www.sqlite.org/pragma.html
+    """
+
+    type = "pragma_statement"
+
+    _pragma_value = OneOf(
+        Ref("LiteralGrammar"),
+        Ref("BooleanLiteralGrammar"),
+        "YES",
+        "NO",
+        "ON",
+        "OFF",
+        "NONE",
+        "FULL",
+        "INCREMENTAL",
+        "DELETE",
+        "TRUNCATE",
+        "PERSIST",
+        "MEMORY",
+        "WAL",
+        "NORMAL",
+        "EXCLUSIVE",
+        "FAST",
+        "EXTRA",
+        "DEFAULT",
+        "FILE",
+        "PASSIVE",
+        "RESTART",
+        "RESET",
+    )
+
+    match_grammar = Sequence(
+        "PRAGMA",
+        Ref("PragmaReferenceSegment"),
+        Bracketed(_pragma_value, optional=True),
+        Sequence(
+            Ref("EqualsSegment"), OptionallyBracketed(_pragma_value), optional=True
+        ),
+    )
+
+
+class StatementSegment(ansi.StatementSegment):
+    """Overriding StatementSegment to allow for additional segment parsing."""
+
+    match_grammar = ansi.StatementSegment.match_grammar
+
+    parse_grammar: Matchable = OneOf(
+        Ref("AlterTableStatementSegment"),
+        Ref("CreateIndexStatementSegment"),
+        Ref("CreateTableStatementSegment"),
+        Ref("CreateTriggerStatementSegment"),
+        Ref("CreateViewStatementSegment"),
+        Ref("DeleteStatementSegment"),
+        Ref("DropIndexStatementSegment"),
+        Ref("DropTableStatementSegment"),
+        Ref("DropTriggerStatementSegment"),
+        Ref("DropViewStatementSegment"),
+        Ref("ExplainStatementSegment"),
+        Ref("InsertStatementSegment"),
+        Ref("PragmaStatementSegment"),
+        Ref("SelectableGrammar"),
+        Ref("TransactionStatementSegment"),
+        Ref("UpdateStatementSegment"),
+        Bracketed(Ref("StatementSegment")),
+    )
diff --git a/src/sqlfluff/dialects/dialect_sqlite_keywords.py b/src/sqlfluff/dialects/dialect_sqlite_keywords.py
new file mode 100644
index 0000000..4cdfb92
--- /dev/null
+++ b/src/sqlfluff/dialects/dialect_sqlite_keywords.py
@@ -0,0 +1,203 @@
+"""A List of SQLite keywords.
+
+https://www.sqlite.org/lang_keywords.html
+
+Augmented with data types, and a couple of omitted keywords.
+"""
+
+RESERVED_KEYWORDS = [
+    "ABORT",
+    "ACTION",
+    "ADD",
+    "AFTER",
+    "ALL",
+    "ALTER",
+    "ALWAYS",
+    "ANALYZE",
+    "AND",
+    "AS",
+    "ASC",
+    "ATTACH",
+    "AUTOINCREMENT",
+    "BEFORE",
+    "BEGIN",
+    "BETWEEN",
+    "BY",
+    "CASCADE",
+    "CASE",
+    "CAST",
+    "CHECK",
+    "COLLATE",
+    "COLUMN",
+    "COMMIT",
+    "CONFLICT",
+    "CONSTRAINT",
+    "CREATE",
+    "CROSS",
+    "CURRENT",
+    "CURRENT_DATE",
+    "CURRENT_TIME",
+    "CURRENT_TIMESTAMP",
+    "DATABASE",
+    "DEFAULT",
+    "DEFERRABLE",
+    "DEFERRED",
+    "DELETE",
+    "DESC",
+    "DETACH",
+    "DISTINCT",
+    "DO",
+    "DROP",
+    "EACH",
+    "ELSE",
+    "END",
+    "ESCAPE",
+    "EXCEPT",
+    "EXCLUDE",
+    "EXCLUSIVE",
+    "EXISTS",
+    "EXPLAIN",
+    "FAIL",
+    "FILTER",
+    "FIRST",
+    "FOLLOWING",
+    "FOR",
+    "FOREIGN",
+    "FROM",
+    "FULL",
+    "GENERATED",
+    "GLOB",
+    "GROUP",
+    "GROUPS",
+    "HAVING",
+    "IF",
+    "IGNORE",
+    "IMMEDIATE",
+    "IN",
+    "INDEX",
+    "INDEXED",
+    "INITIALLY",
+    "INNER",
+    "INSERT",
+    "INSTEAD",
+    "INTERSECT",
+    "INTO",
+    "IS",
+    "ISNULL",
+    "JOIN",
+    "KEY",
+    "LAST",
+    "LEFT",
+    "LIKE",
+    "LIMIT",
+    "MATCH",
+    "MATERIALIZED",
+    "NATURAL",
+    "NO",
+    "NOT",
+    "NOTHING",
+    "NOTNULL",
+    "NULL",
+    "NULLS",
+    "OF",
+    "OFFSET",
+    "ON",
+    "OR",
+    "ORDER",
+    "OTHERS",
+    "OUTER",
+    "OVER",
+    "PARTITION",
+    "PLAN",
+    "PRAGMA",
+    "PRECEDING",
+    "PRIMARY",
+    "QUERY",
+    "RAISE",
+    "RANGE",
+    "RECURSIVE",
+    "REFERENCES",
+    "REGEXP",
+    "REINDEX",
+    "RELEASE",
+    "RENAME",
+    "REPLACE",
+    "RESTRICT",
+    "RETURNING",
+    "RIGHT",
+    "ROLLBACK",
+    "ROW",
+    "ROWS",
+    "SAVEPOINT",
+    "SELECT",
+    "SET",
+    "TABLE",
+    "TEMP",
+    "TEMPORARY",
+    "THEN",
+    "TIES",
+    "TO",
+    "TRANSACTION",
+    "TRIGGER",
+    "UNBOUNDED",
+    "UNION",
+    "UNIQUE",
+    "UPDATE",
+    "USING",
+    "VACUUM",
+    "VALUES",
+    "VIEW",
+    "VIRTUAL",
+    "WHEN",
+    "WHERE",
+    "WINDOW",
+    "WITH",
+    "WITHOUT",
+]
+
+UNRESERVED_KEYWORDS = [
+    "INT",
+    "INTEGER",
+    "TINYINT",
+    "SMALLINT",
+    "MEDIUMINT",
+    "BIGINT",
+    "UNSIGNED",
+    "INT2",
+    "INT8",
+    "CHARACTER",
+    "VARCHAR",
+    "VARYING",
+    "NCHAR",
+    "NATIVE",
+    "NVARCHAR",
+    "TEXT",
+    "CLOB",
+    "BLOB",
+    "REAL",
+    "BIG",
+    "DOUBLE",
+    "PRECISION",
+    "FLOAT",
+    "NUMERIC",
+    "DECIMAL",
+    "BOOLEAN",
+    "DATE",
+    "DATETIME",
+    "ROWID",
+    "YES",
+    "OFF",
+    "NONE",
+    "INCREMENTAL",
+    "TRUNCATE",
+    "PERSIST",
+    "MEMORY",
+    "WAL",
+    "NORMAL",
+    "FAST",
+    "EXTRA",
+    "FILE",
+    "PASSIVE",
+    "RESTART",
+    "RESET",
+]
diff --git a/src/sqlfluff/dialects/dialect_teradata.py b/src/sqlfluff/dialects/dialect_teradata.py
index 09230eb..ff9282c 100644
--- a/src/sqlfluff/dialects/dialect_teradata.py
+++ b/src/sqlfluff/dialects/dialect_teradata.py
@@ -24,6 +24,7 @@ from sqlfluff.core.parser import (
     RegexLexer,
     Sequence,
     StartsWith,
+    StringParser,
 )
 
 from sqlfluff.core.dialects import load_raw_dialect
@@ -77,15 +78,19 @@ teradata_dialect.sets("unreserved_keywords").update(
         "MAXVALUELENGTH",
         "MEETS",
         "MERGEBLOCKRATIO",
+        "NONE",
         "PERCENT",
         "PROFILE",
         "PROTECTION",
+        "QUERY_BAND",
         "QUIT",
         "RUN",
         "SAMPLE",
         "SEL",
         "SS",
         "STAT",
+        "STATS",
+        "STATISTICS",
         "SUMMARY",
         "THRESHOLD",
         "UC",
@@ -97,6 +102,41 @@ teradata_dialect.sets("reserved_keywords").update(["UNION", "TIMESTAMP"])
 
 teradata_dialect.sets("bare_functions").update(["DATE"])
 
+teradata_dialect.replace(
+    # ANSI standard comparison operators plus Teradata extensions
+    ComparisonOperatorGrammar=OneOf(
+        Ref("EqualsSegment"),
+        Ref("EqualsSegment_a"),
+        Ref("GreaterThanSegment"),
+        Ref("GreaterThanSegment_a"),
+        Ref("LessThanSegment"),
+        Ref("LessThanSegment_a"),
+        Ref("GreaterThanOrEqualToSegment"),
+        Ref("GreaterThanOrEqualToSegment_a"),
+        Ref("LessThanOrEqualToSegment"),
+        Ref("LessThanOrEqualToSegment_a"),
+        Ref("NotEqualToSegment"),
+        Ref("NotEqualToSegment_a"),
+        Ref("NotEqualToSegment_b"),
+        Ref("NotEqualToSegment_c"),
+        Ref("LikeOperatorSegment"),
+        Sequence("IS", "DISTINCT", "FROM"),
+        Sequence("IS", "NOT", "DISTINCT", "FROM"),
+    )
+)
+
+teradata_dialect.add(
+    # Add Teradata comparison operator extensions
+    EqualsSegment_a=StringParser("EQ", ansi.ComparisonOperatorSegment),
+    GreaterThanSegment_a=StringParser("GT", ansi.ComparisonOperatorSegment),
+    LessThanSegment_a=StringParser("LT", ansi.ComparisonOperatorSegment),
+    GreaterThanOrEqualToSegment_a=StringParser("GE", ansi.ComparisonOperatorSegment),
+    LessThanOrEqualToSegment_a=StringParser("LE", ansi.ComparisonOperatorSegment),
+    NotEqualToSegment_a=StringParser("NE", ansi.ComparisonOperatorSegment),
+    NotEqualToSegment_b=StringParser("NOT=", ansi.ComparisonOperatorSegment),
+    NotEqualToSegment_c=StringParser("^=", ansi.ComparisonOperatorSegment),
+)
+
 
 # BTEQ statement
 class BteqKeyWordSegment(BaseSegment):
@@ -213,7 +253,7 @@ class TdCollectStatisticsStatementSegment(BaseSegment):
     match_grammar = Sequence(
         "COLLECT",
         Ref.keyword("SUMMARY", optional=True),
-        OneOf("STATISTICS", "STAT"),
+        OneOf("STAT", "STATS", "STATISTICS"),
         Sequence(
             "USING",
             Delimited(
@@ -243,7 +283,7 @@ class TdCollectStatisticsStatementSegment(BaseSegment):
                 # COLUMN ...
                 Sequence(
                     "COLUMN",
-                    Bracketed(
+                    OptionallyBracketed(
                         Delimited(
                             OneOf(
                                 Ref("ColumnReferenceSegment"),
@@ -340,6 +380,7 @@ class DatatypeSegment(ansi.DatatypeSegment):
 
     match_grammar = Sequence(
         Ref("DatatypeIdentifierSegment"),
+        Ref("BracketedArguments", optional=True),
         Bracketed(
             OneOf(
                 Delimited(Ref("ExpressionSegment")),
@@ -574,6 +615,13 @@ class TdTableConstraints(BaseSegment):
         ),
         # WITH DATA
         Sequence("WITH", Sequence("NO", optional=True), "DATA"),
+        # AND STATISITCS
+        Sequence(
+            "AND",
+            Sequence("NO", optional=True),
+            OneOf("STAT", "STATS", "STATISTICS"),
+            optional=True,
+        ),
         # ON COMMIT PRESERVE ROWS
         Sequence("ON", "COMMIT", OneOf("PRESERVE", "DELETE"), "ROWS"),
     )
@@ -673,24 +721,14 @@ class StatementSegment(ansi.StatementSegment):
             Ref("TdCommentStatementSegment"),
             Ref("DatabaseStatementSegment"),
             Ref("SetSessionStatementSegment"),
+            Ref("SetQueryBandStatementSegment"),
         ],
     )
 
     match_grammar = ansi.StatementSegment.match_grammar.copy()
 
 
-teradata_dialect.add(
-    TdCastIdentifierGrammar=Sequence(
-        OneOf("DATE", "TIMESTAMP"), Ref("ExpressionSegment")
-    ),
-)
-
 teradata_dialect.replace(
-    SingleIdentifierGrammar=OneOf(
-        Ref("NakedIdentifierSegment"),
-        Ref("QuotedIdentifierSegment"),
-        Ref("TdCastIdentifierGrammar"),
-    ),
     SelectClauseSegmentGrammar=Sequence(
         OneOf("SELECT", "SEL"),
         Ref("SelectClauseModifierSegment", optional=True),
@@ -757,6 +795,12 @@ class SelectClauseModifierSegment(BaseSegment):
     match_grammar = OneOf(
         "DISTINCT",
         "ALL",
+        Sequence(
+            "TOP",
+            Ref("ExpressionSegment"),
+            Sequence("PERCENT", optional=True),
+            Sequence("WITH", "TIES", optional=True),
+        ),
         Sequence(
             "NORMALIZE",
             OneOf(
@@ -833,3 +877,46 @@ class SetSessionStatementSegment(BaseSegment):
         ),
         Ref("DatabaseStatementSegment"),
     )
+
+
+class SetQueryBandStatementSegment(BaseSegment):
+    """A `SET QUERY_BAND` statement.
+
+    SET QUERY_BAND = { 'band_specification [...]' | NONE } [ UPDATE ]
+    FOR { SESSION [VOLATILE] | TRANSACTION } [;]
+
+    https://docs.teradata.com/r/Teradata-VantageTM-SQL-Data-Definition-Language-Syntax-and-Examples/July-2021/Session-Statements/SET-QUERY_BAND
+    """
+
+    type = "set_query_band_statement"
+    match_grammar: Matchable = Sequence(
+        "SET",
+        "QUERY_BAND",
+        Ref("EqualsSegment"),
+        OneOf(Ref("QuotedLiteralSegment"), "NONE"),
+        Sequence("UPDATE", optional=True),
+        "FOR",
+        OneOf(Sequence("SESSION", Sequence("VOLATILE", optional=True)), "TRANSACTION"),
+    )
+
+
+class NotEqualToSegment_b(ansi.CompositeComparisonOperatorSegment):
+    """The comparison operator extension NOT=.
+
+    https://www.docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/Supported-Comparison-Operators
+    """
+
+    match_grammar = Sequence(
+        Ref("NotOperatorGrammar"), Ref("RawEqualsSegment"), allow_gaps=False
+    )
+
+
+class NotEqualToSegment_c(ansi.CompositeComparisonOperatorSegment):
+    """The comparison operator extension ^=.
+
+    https://www.docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/Supported-Comparison-Operators
+    """
+
+    match_grammar = Sequence(
+        Ref("BitwiseXorSegment"), Ref("RawEqualsSegment"), allow_gaps=False
+    )
diff --git a/src/sqlfluff/dialects/dialect_tsql.py b/src/sqlfluff/dialects/dialect_tsql.py
index cd0e4f6..47384f9 100644
--- a/src/sqlfluff/dialects/dialect_tsql.py
+++ b/src/sqlfluff/dialects/dialect_tsql.py
@@ -15,8 +15,10 @@ from sqlfluff.core.parser import (
     Conditional,
     Dedent,
     Delimited,
+    ImplicitIndent,
     Indent,
     Matchable,
+    MultiStringParser,
     Nothing,
     OneOf,
     OptionallyBracketed,
@@ -48,12 +50,14 @@ tsql_dialect.sets("datetime_units").update(
     [
         "D",
         "DAY",
+        "DAYS",
         "DAYOFYEAR",
         "DD",
         "DW",
         "DY",
         "HH",
         "HOUR",
+        "INFINITE",
         "M",
         "MCS",
         "MI",
@@ -62,6 +66,7 @@ tsql_dialect.sets("datetime_units").update(
         "MINUTE",
         "MM",
         "MONTH",
+        "MONTHS",
         "MS",
         "N",
         "NANOSECOND",
@@ -74,10 +79,12 @@ tsql_dialect.sets("datetime_units").update(
         "SS",
         "W",
         "WEEK",
+        "WEEKS",
         "WEEKDAY",
         "WK",
         "WW",
         "YEAR",
+        "YEARS",
         "Y",
         "YY",
         "YYYY",
@@ -89,6 +96,38 @@ tsql_dialect.sets("date_part_function_name").update(
     ["DATEADD", "DATEDIFF", "DATEDIFF_BIG", "DATENAME", "DATEPART"]
 )
 
+tsql_dialect.sets("bare_functions").update(
+    ["system_user", "session_user", "current_user"]
+)
+
+tsql_dialect.sets("sqlcmd_operators").clear()
+tsql_dialect.sets("sqlcmd_operators").update(["r", "setvar"])
+
+tsql_dialect.sets("file_compression").clear()
+tsql_dialect.sets("file_compression").update(
+    [
+        "'org.apache.hadoop.io.compress.GzipCodec'",
+        "'org.apache.hadoop.io.compress.DefaultCodec'",
+        "'org.apache.hadoop.io.compress.SnappyCodec'",
+    ]
+)
+
+tsql_dialect.sets("file_encoding").clear()
+tsql_dialect.sets("file_encoding").update(
+    [
+        "'UTF8'",
+        "'UTF16'",
+    ]
+)
+
+tsql_dialect.sets("serde_method").clear()
+tsql_dialect.sets("serde_method").update(
+    [
+        "'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'",
+        "'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'",
+    ]
+)
+
 tsql_dialect.insert_lexer_matchers(
     [
         RegexLexer(
@@ -122,6 +161,13 @@ tsql_dialect.insert_lexer_matchers(
             CodeSegment,
             segment_kwargs={"type": "hash_prefix"},
         ),
+        RegexLexer(
+            "unquoted_relative_sql_file_path",
+            # currently there is no way to pass `regex.IGNORECASE` flag to `RegexLexer`
+            r"[.\w\\/#-]+\.[sS][qQ][lL]",
+            CodeSegment,
+            segment_kwargs={"type": "unquoted_relative_sql_file_path"},
+        ),
     ],
     before="back_quote",
 )
@@ -179,6 +225,7 @@ tsql_dialect.patch_lexer_matchers(
                 r"[^\S\r\n]+",
                 WhitespaceSegment,
             ),
+            segment_kwargs={"type": "block_comment"},
         ),
         RegexLexer(
             "code", r"[0-9a-zA-Z_#@]+", CodeSegment
@@ -237,6 +284,38 @@ tsql_dialect.add(
             anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$",
         )
     ),
+    SqlcmdOperatorSegment=SegmentGenerator(
+        lambda dialect: MultiStringParser(
+            dialect.sets("sqlcmd_operators"),
+            CodeSegment,
+            type="sqlcmd_operator",
+        )
+    ),
+    SqlcmdFilePathSegment=TypedParser(
+        "unquoted_relative_sql_file_path",
+        CodeSegment,
+    ),
+    FileCompressionSegment=SegmentGenerator(
+        lambda dialect: MultiStringParser(
+            dialect.sets("file_compression"),
+            CodeSegment,
+            type="file_compression",
+        )
+    ),
+    FileEncodingSegment=SegmentGenerator(
+        lambda dialect: MultiStringParser(
+            dialect.sets("file_encoding"),
+            CodeSegment,
+            type="file_encoding",
+        )
+    ),
+    SerdeMethodSegment=SegmentGenerator(
+        lambda dialect: MultiStringParser(
+            dialect.sets("serde_method"),
+            CodeSegment,
+            type="serde_method",
+        )
+    ),
 )
 
 tsql_dialect.replace(
@@ -308,13 +387,18 @@ tsql_dialect.replace(
     ),
     DatatypeIdentifierSegment=SegmentGenerator(
         # Generate the anti template reserved keywords
-        lambda dialect: RegexParser(
-            r"[A-Z][A-Z0-9_]*|\[[A-Z][A-Z0-9_]*\]",
-            CodeSegment,
-            type="data_type_identifier",
-            # anti_template=r"^(NOT)$",
-            anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$",
-            # TODO - this is a stopgap until we implement explicit data types
+        lambda dialect: OneOf(
+            RegexParser(
+                r"[A-Z][A-Z0-9_]*|\[[A-Z][A-Z0-9_]*\]",
+                CodeSegment,
+                type="data_type_identifier",
+                # anti_template=r"^(NOT)$",
+                anti_template=r"^("
+                + r"|".join(dialect.sets("reserved_keywords"))
+                + r")$",
+                # TODO - this is a stopgap until we implement explicit data types
+            ),
+            Ref("SingleIdentifierGrammar", exclude=Ref("NakedIdentifierSegment")),
         ),
     ),
     PrimaryKeyGrammar=Sequence(
@@ -454,9 +538,8 @@ tsql_dialect.replace(
             Ref("SelectStatementSegment"),
             Ref("LiteralGrammar"),
             Ref("ColumnReferenceSegment"),
-            Sequence(
-                Ref("SimpleArrayTypeGrammar", optional=True), Ref("ArrayLiteralSegment")
-            ),
+            Ref("TypedArrayLiteralSegment"),
+            Ref("ArrayLiteralSegment"),
         ),
         Ref("Accessor_Grammar", optional=True),
         allow_gaps=True,
@@ -515,6 +598,13 @@ class StatementSegment(ansi.StatementSegment):
             Ref("CreateTypeStatementSegment"),
             Ref("CreateSynonymStatementSegment"),
             Ref("DropSynonymStatementSegment"),
+            Ref("BulkInsertStatementSegment"),
+            Ref("AlterIndexStatementSegment"),
+            Ref("CreateDatabaseScopedCredentialStatementSegment"),
+            Ref("CreateExternalDataSourceStatementSegment"),
+            Ref("SqlcmdCommandSegment"),
+            Ref("CreateExternalFileFormat"),
+            Ref("CreateExternalTableStatementSegment"),
         ],
         remove=[
             Ref("CreateModelStatementSegment"),
@@ -689,6 +779,86 @@ class InsertStatementSegment(BaseSegment):
     )
 
 
+class BulkInsertStatementSegment(BaseSegment):
+    """A `BULK INSERT` statement.
+
+    https://learn.microsoft.com/en-us/sql/t-sql/statements/bulk-insert-transact-sql?view=sql-server-ver16
+    """
+
+    type = "bulk_insert_statement"
+    match_grammar = Sequence(
+        "BULK",
+        "INSERT",
+        Ref("TableReferenceSegment"),
+        "FROM",
+        Ref("QuotedLiteralSegment"),
+        Ref("BulkInsertStatementWithSegment", optional=True),
+    )
+
+
+class BulkInsertStatementWithSegment(BaseSegment):
+    """A `WITH` segment in the BULK INSERT statement.
+
+    https://learn.microsoft.com/en-us/sql/t-sql/statements/bulk-insert-transact-sql?view=sql-server-ver16
+    """
+
+    type = "bulk_insert_with_segment"
+    match_grammar = Sequence(
+        "WITH",
+        Bracketed(
+            Delimited(
+                AnyNumberOf(
+                    Sequence(
+                        OneOf(
+                            "BATCHSIZE",
+                            "FIRSTROW",
+                            "KILOBYTES_PER_BATCH",
+                            "LASTROW",
+                            "MAXERRORS",
+                            "ROWS_PER_BATCH",
+                        ),
+                        Ref("EqualsSegment"),
+                        Ref("NumericLiteralSegment"),
+                    ),
+                    Sequence(
+                        OneOf(
+                            "CODEPAGE",
+                            "DATAFILETYPE",
+                            "DATA_SOURCE",
+                            "ERRORFILE",
+                            "ERRORFILE_DATA_SOURCE",
+                            "FORMATFILE_DATA_SOURCE",
+                            "ROWTERMINATOR",
+                            "FORMAT",
+                            "FIELDQUOTE",
+                            "FORMATFILE",
+                            "FIELDTERMINATOR",
+                        ),
+                        Ref("EqualsSegment"),
+                        Ref("QuotedLiteralSegment"),
+                    ),
+                    Sequence(
+                        "ORDER",
+                        Bracketed(
+                            Delimited(
+                                Sequence(
+                                    Ref("ColumnReferenceSegment"),
+                                    OneOf("ASC", "DESC", optional=True),
+                                ),
+                            ),
+                        ),
+                    ),
+                    "CHECK_CONSTRAINTS",
+                    "FIRE_TRIGGERS",
+                    "KEEPIDENTITY",
+                    "KEEPNULLS",
+                    "TABLOCK",
+                )
+            )
+        ),
+    )
+
+
 class WithCompoundStatementSegment(BaseSegment):
     """A `SELECT` statement preceded by a selection of `WITH` clauses.
 
@@ -734,7 +904,7 @@ class SelectStatementSegment(BaseSegment):
             Ref("OrderByClauseSegment", optional=True),
             Ref("OptionClauseSegment", optional=True),
             Ref("DelimiterGrammar", optional=True),
-            Ref("ForXmlSegment", optional=True),
+            Ref("ForClauseSegment", optional=True),
         ]
     )
 
@@ -760,7 +930,7 @@ class WhereClauseSegment(BaseSegment):
     type = "where_clause"
     match_grammar = Sequence(
         "WHERE",
-        Indent,
+        ImplicitIndent,
         OptionallyBracketed(Ref("ExpressionSegment")),
         Dedent,
     )
@@ -776,13 +946,13 @@ class CreateIndexStatementSegment(BaseSegment):
     type = "create_index_statement"
     match_grammar = Sequence(
         "CREATE",
-        Indent,
         Ref("OrReplaceGrammar", optional=True),
         Sequence("UNIQUE", optional=True),
         OneOf("CLUSTERED", "NONCLUSTERED", optional=True),
         OneOf("INDEX", "STATISTICS"),
         Ref("IfNotExistsGrammar", optional=True),
         Ref("IndexReferenceSegment"),
+        Indent,
         "ON",
         Ref("TableReferenceSegment"),
         Ref("BracketedIndexColumnListGrammar"),
@@ -800,310 +970,795 @@ class CreateIndexStatementSegment(BaseSegment):
     )
 
 
-class OnPartitionOrFilegroupOptionSegment(BaseSegment):
-    """ON partition scheme or filegroup option.
+class AlterIndexStatementSegment(BaseSegment):
+    """An ALTER INDEX statement.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
+    As per.
+    https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-index-transact-sql?view=sql-server-ver15
     """
 
-    type = "on_partition_or_filegroup_statement"
-    match_grammar = OneOf(
-        Ref("PartitionSchemeClause"),
-        Ref("FilegroupClause"),
-        Ref("LiteralGrammar"),  # for "default" value
-    )
-
-
-class FilestreamOnOptionSegment(BaseSegment):
-    """FILESTREAM_ON index option in `CREATE INDEX` and 'CREATE TABLE' statements.
-
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
-    """
+    type = "alter_index_statement"
 
-    type = "filestream_on_option_statement"
-    match_grammar = Sequence(
-        "FILESTREAM_ON",
-        OneOf(
-            Ref("FilegroupNameSegment"),
-            Ref("PartitionSchemeNameSegment"),
-            OneOf(
-                "NULL",
-                Ref("LiteralGrammar"),  # for "default" value
+    _low_priority_lock_wait = Sequence(
+        "WAIT_AT_LOW_PRIORITY",
+        Bracketed(
+            Sequence(
+                "MAX_DURATION",
+                Ref("EqualsSegment"),
+                Ref("NumericLiteralSegment"),
+                Ref.keyword("MINUTES", optional=True),
+            ),
+            Ref("CommaSegment"),
+            Sequence(
+                "ABORT_AFTER_WAIT",
+                Ref("EqualsSegment"),
+                OneOf(
+                    "NONE",
+                    "SELF",
+                    "BLOCKERS",
+                ),
             ),
         ),
     )
 
-
-class TextimageOnOptionSegment(BaseSegment):
-    """TEXTIMAGE ON option in `CREATE TABLE` statement.
-
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
-    """
-
-    type = "textimage_on_option_statement"
-    match_grammar = Sequence(
-        "TEXTIMAGE_ON",
-        OneOf(
-            Ref("FilegroupNameSegment"),
-            Ref("LiteralGrammar"),  # for "default" value
+    _on_partitions = Sequence(
+        Sequence(
+            "ON",
+            "PARTITIONS",
+        ),
+        Bracketed(
+            Delimited(
+                Ref("NumericLiteralSegment"),
+            ),
+            Sequence(
+                "TO",
+                Ref("NumericLiteralSegment"),
+                optional=True,
+            ),
         ),
+        optional=True,
     )
 
-
-class ReferencesConstraintGrammar(BaseSegment):
-    """REFERENCES constraint option in `CREATE TABLE` statement.
-
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
-    """
-
-    type = "references_constraint_grammar"
-    match_grammar = Sequence(
-        # REFERENCES reftable [ ( refcolumn) ]
-        "REFERENCES",
-        Ref("TableReferenceSegment"),
-        # Foreign columns making up FOREIGN KEY constraint
-        Ref("BracketedColumnReferenceListGrammar", optional=True),
+    _rebuild_index_option = AnyNumberOf(
         Sequence(
-            "ON",
-            "DELETE",
             OneOf(
-                Sequence("NO", "ACTION"),
-                "CASCADE",
-                Sequence("SET", "NULL"),
-                Sequence("SET", "DEFAULT"),
+                "PAD_INDEX",
+                "SORT_IN_TEMPDB",
+                "IGNORE_DUP_KEY",
+                "STATISTICS_NORECOMPUTE",
+                "STATISTICS_INCREMENTAL",
+                "RESUMABLE",
+                "ALLOW_ROW_LOCKS",
+                "ALLOW_PAGE_LOCKS",
+            ),
+            Ref("EqualsSegment"),
+            OneOf(
+                "ON",
+                "OFF",
             ),
-            optional=True,
         ),
         Sequence(
-            "ON",
-            "UPDATE",
             OneOf(
-                Sequence("NO", "ACTION"),
-                "CASCADE",
-                Sequence("SET", "NULL"),
-                Sequence("SET", "DEFAULT"),
+                "MAXDOP",
+                "FILLFACTOR",
+                "MAX_DURATION",
             ),
-            optional=True,
+            Ref("EqualsSegment"),
+            Ref("NumericLiteralSegment"),
+            Ref.keyword("MINUTES", optional=True),
+        ),
+        Sequence(
+            "ONLINE",
+            Ref("EqualsSegment"),
+            OneOf(
+                Sequence(
+                    "ON",
+                    Bracketed(
+                        _low_priority_lock_wait,
+                        optional=True,
+                    ),
+                ),
+                "OFF",
+            ),
+        ),
+        Sequence(
+            "DATA_COMPRESSION",
+            Ref("EqualsSegment"),
+            OneOf(
+                "NONE",
+                "ROW",
+                "PAGE",
+                "COLUMNSTORE",
+                "COLUMNSTORE_ARCHIVE",
+            ),
+            _on_partitions,
+        ),
+        Sequence(
+            "XML_COMPRESSION",
+            Ref("EqualsSegment"),
+            OneOf(
+                "ON",
+                "OFF",
+            ),
+            _on_partitions,
         ),
-        Sequence("NOT", "FOR", "REPLICATION", optional=True),
     )
 
-
-class CheckConstraintGrammar(BaseSegment):
-    """CHECK constraint option in `CREATE TABLE` statement.
-
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
-    """
-
-    type = "check_constraint_grammar"
-    match_grammar = Sequence(
-        "CHECK",
-        Sequence("NOT", "FOR", "REPLICATION", optional=True),
-        Bracketed(
-            Ref("ExpressionSegment"),
+    _single_partition_rebuild_index_option = AnyNumberOf(
+        Sequence(
+            OneOf(
+                "XML_COMPRESSION",
+                "SORT_IN_TEMPDB",
+                "RESUMABLE",
+            ),
+            Ref("EqualsSegment"),
+            OneOf(
+                "ON",
+                "OFF",
+            ),
+        ),
+        Sequence(
+            OneOf(
+                "MAXDOP",
+                "MAX_DURATION",
+            ),
+            Ref("EqualsSegment"),
+            Ref("NumericLiteralSegment"),
+            Ref.keyword("MINUTES", optional=True),
+        ),
+        Sequence(
+            "DATA_COMPRESSION",
+            Ref("EqualsSegment"),
+            OneOf(
+                "NONE",
+                "ROW",
+                "PAGE",
+                "COLUMNSTORE",
+                "COLUMNSTORE_ARCHIVE",
+            ),
+        ),
+        Sequence(
+            "ONLINE",
+            Ref("EqualsSegment"),
+            OneOf(
+                Sequence(
+                    "ON",
+                    Bracketed(
+                        _low_priority_lock_wait,
+                        optional=True,
+                    ),
+                ),
+                "OFF",
+            ),
         ),
     )
 
-
-class RelationalIndexOptionsSegment(BaseSegment):
-    """A relational index options in `CREATE INDEX` statement.
-
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15
-    """
-
-    type = "relational_index_options"
     match_grammar = Sequence(
-        "WITH",
-        OptionallyBracketed(
-            Delimited(
-                AnyNumberOf(
+        "ALTER",
+        "INDEX",
+        OneOf(
+            Ref("ObjectReferenceSegment"),
+            "ALL",
+        ),
+        "ON",
+        Ref("TableReferenceSegment"),
+        OneOf(
+            Sequence(
+                "REBUILD",
+                OneOf(
                     Sequence(
-                        OneOf(
-                            "PAD_INDEX",
-                            "FILLFACTOR",
-                            "SORT_IN_TEMPDB",
-                            "IGNORE_DUP_KEY",
-                            "STATISTICS_NORECOMPUTE",
-                            "STATISTICS_INCREMENTAL",
-                            "DROP_EXISTING",
-                            "RESUMABLE",
-                            "ALLOW_ROW_LOCKS",
-                            "ALLOW_PAGE_LOCKS",
-                            "OPTIMIZE_FOR_SEQUENTIAL_KEY",
-                            "MAXDOP",
+                        Sequence(
+                            "PARTITION",
+                            Ref("EqualsSegment"),
+                            "ALL",
+                            optional=True,
                         ),
-                        Ref("EqualsSegment"),
-                        OneOf(
-                            "ON",
-                            "OFF",
-                            Ref("LiteralGrammar"),
+                        Sequence(
+                            "WITH",
+                            Bracketed(
+                                Delimited(
+                                    _rebuild_index_option,
+                                )
+                            ),
+                            optional=True,
                         ),
                     ),
-                    Ref("MaxDurationSegment"),
                     Sequence(
-                        "ONLINE",
-                        Ref("EqualsSegment"),
-                        OneOf(
-                            "OFF",
-                            Sequence(
-                                "ON",
-                                Bracketed(
-                                    Sequence(
-                                        "WAIT_AT_LOW_PRIORITY",
-                                        Bracketed(
-                                            Delimited(
-                                                Ref("MaxDurationSegment"),
-                                                Sequence(
-                                                    "ABORT_AFTER_WAIT",
-                                                    Ref("EqualsSegment"),
-                                                    OneOf(
-                                                        "NONE",
-                                                        "SELF",
-                                                        "BLOCKERS",
-                                                    ),
-                                                ),
-                                            ),
-                                        ),
-                                    ),
-                                    optional=True,
+                        Sequence(
+                            "PARTITION",
+                            Ref("EqualsSegment"),
+                            Ref("NumericLiteralSegment"),
+                            optional=True,
+                        ),
+                        Sequence(
+                            "WITH",
+                            Bracketed(
+                                Delimited(
+                                    _single_partition_rebuild_index_option,
                                 ),
                             ),
+                            optional=True,
                         ),
                     ),
-                    # for table constrains
-                    Sequence(
-                        "COMPRESSION_DELAY",
-                        Ref("EqualsSegment"),
-                        Ref("NumericLiteralSegment"),
+                    optional=True,
+                ),
+            ),
+            "DISABLE",
+            Sequence(
+                "REORGANIZE",
+                Sequence(
+                    "PARTITION",
+                    Ref("EqualsSegment"),
+                    Ref("NumericLiteralSegment"),
+                    optional=True,
+                ),
+                Sequence(
+                    "WITH",
+                    Bracketed(
                         Sequence(
-                            "MINUTES",
-                            optional=True,
+                            OneOf(
+                                "LOB_COMPACTION",
+                                "COMPRESS_ALL_ROW_GROUPS",
+                            ),
+                            Ref("EqualsSegment"),
+                            OneOf(
+                                "ON",
+                                "OFF",
+                            ),
                         ),
                     ),
-                    Sequence(
-                        "DATA_COMPRESSION",
-                        Ref("EqualsSegment"),
-                        OneOf(
-                            "NONE",
-                            "ROW",
-                            "PAGE",
-                            "COLUMNSTORE",  # for table constrains
-                            "COLUMNSTORE_ARCHIVE",  # for table constrains
+                    optional=True,
+                ),
+            ),
+            Sequence(
+                "SET",
+                Bracketed(
+                    Delimited(
+                        AnyNumberOf(
+                            Sequence(
+                                OneOf(
+                                    "ALLOW_ROW_LOCKS",
+                                    "ALLOW_PAGE_LOCKS",
+                                    "OPTIMIZE_FOR_SEQUENTIAL_KEY",
+                                    "IGNORE_DUP_KEY",
+                                    "STATISTICS_NORECOMPUTE",
+                                ),
+                                Ref("EqualsSegment"),
+                                OneOf(
+                                    "ON",
+                                    "OFF",
+                                ),
+                            ),
+                            Sequence(
+                                "COMPRESSION_DELAY",
+                                Ref("EqualsSegment"),
+                                Ref("NumericLiteralSegment"),
+                                Ref.keyword("MINUTES", optional=True),
+                            ),
                         ),
-                        Ref("OnPartitionsSegment", optional=True),
                     ),
-                    min_times=1,
                 ),
             ),
+            Sequence(
+                "RESUME",
+                Sequence(
+                    "WITH",
+                    Bracketed(
+                        Delimited(
+                            Sequence(
+                                OneOf(
+                                    "MAX_DURATION",
+                                    "MAXDOP",
+                                ),
+                                Ref("EqualsSegment"),
+                                Ref("NumericLiteralSegment"),
+                                Ref.keyword("MINUTES", optional=True),
+                            ),
+                            _low_priority_lock_wait,
+                        ),
+                    ),
+                    optional=True,
+                ),
+            ),
+            "PAUSE",
+            "ABORT",
         ),
     )
 
 
-class MaxDurationSegment(BaseSegment):
-    """A `MAX DURATION` clause.
+class OnPartitionOrFilegroupOptionSegment(BaseSegment):
+    """ON partition scheme or filegroup option.
 
     https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
     """
 
-    type = "max_duration"
-    match_grammar = Sequence(
-        "MAX_DURATION",
-        Ref("EqualsSegment"),
-        Ref("NumericLiteralSegment"),
-        Sequence(
-            "MINUTES",
-            optional=True,
-        ),
+    type = "on_partition_or_filegroup_statement"
+    match_grammar = OneOf(
+        Ref("PartitionSchemeClause"),
+        Ref("FilegroupClause"),
+        Ref("LiteralGrammar"),  # for "default" value
     )
 
 
-class DropIndexStatementSegment(ansi.DropIndexStatementSegment):
-    """A `DROP INDEX` statement.
+class FilestreamOnOptionSegment(BaseSegment):
+    """FILESTREAM_ON index option in `CREATE INDEX` and 'CREATE TABLE' statements.
 
-    Overriding ANSI to include required ON clause.
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
     """
 
+    type = "filestream_on_option_statement"
     match_grammar = Sequence(
-        "DROP",
-        "INDEX",
-        Ref("IfExistsGrammar", optional=True),
-        Ref("IndexReferenceSegment"),
-        "ON",
-        Ref("TableReferenceSegment"),
-        Ref("DelimiterGrammar", optional=True),
+        "FILESTREAM_ON",
+        OneOf(
+            Ref("FilegroupNameSegment"),
+            Ref("PartitionSchemeNameSegment"),
+            OneOf(
+                "NULL",
+                Ref("LiteralGrammar"),  # for "default" value
+            ),
+        ),
     )
 
 
-class DropStatisticsStatementSegment(BaseSegment):
-    """A `DROP STATISTICS` statement."""
+class TextimageOnOptionSegment(BaseSegment):
+    """TEXTIMAGE ON option in `CREATE TABLE` statement.
 
-    type = "drop_statement"
-    # DROP INDEX <Index name> [CONCURRENTLY] [IF EXISTS] {RESTRICT | CASCADE}
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
+    """
+
+    type = "textimage_on_option_statement"
     match_grammar = Sequence(
-        "DROP",
-        OneOf("STATISTICS"),
-        Ref("IndexReferenceSegment"),
-        Ref("DelimiterGrammar", optional=True),
+        "TEXTIMAGE_ON",
+        OneOf(
+            Ref("FilegroupNameSegment"),
+            Ref("LiteralGrammar"),  # for "default" value
+        ),
     )
 
 
-class UpdateStatisticsStatementSegment(BaseSegment):
-    """An `UPDATE STATISTICS` statement.
+class TableOptionSegment(BaseSegment):
+    """TABLE option in `CREATE TABLE` statement.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/update-statistics-transact-sql?view=sql-server-ver15
+    https://learn.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
     """
 
-    type = "update_statistics_statement"
-    match_grammar = Sequence(
-        "UPDATE",
-        "STATISTICS",
-        Ref("ObjectReferenceSegment"),
-        OneOf(
-            Ref("SingleIdentifierGrammar"),
-            Bracketed(
-                Delimited(
-                    Ref("SingleIdentifierGrammar"),
-                ),
+    _ledger_view_option = Delimited(
+        Sequence(
+            OneOf(
+                "TRANSACTION_ID_COLUMN_NAME",
+                "SEQUENCE_NUMBER_COLUMN_NAME",
+                "OPERATION_TYPE_COLUMN_NAME",
+                "OPERATION_TYPE_DESC_COLUMN_NAME",
             ),
+            Ref("EqualsSegment"),
+            Ref("ColumnReferenceSegment"),
             optional=True,
         ),
-        Ref("DelimiterGrammar", optional=True),
-        Sequence("WITH", OneOf("FULLSCAN", "RESAMPLE"), optional=True),
     )
 
-
-class ObjectReferenceSegment(ansi.ObjectReferenceSegment):
-    """A reference to an object.
-
-    Update ObjectReferenceSegment to only allow dot separated SingleIdentifierGrammar
-    So Square Bracketed identifiers can be matched.
-    """
-
-    # match grammar (allow whitespace)
-    match_grammar: Matchable = Sequence(
-        Ref("SingleIdentifierGrammar"),
-        AnyNumberOf(
+    _on_partitions = Sequence(
+        Sequence(
+            "ON",
+            "PARTITIONS",
+        ),
+        Bracketed(
+            Delimited(
+                Ref("NumericLiteralSegment"),
+            ),
             Sequence(
-                Ref("DotSegment"),
-                Ref("SingleIdentifierGrammar", optional=True),
+                "TO",
+                Ref("NumericLiteralSegment"),
+                optional=True,
             ),
-            min_times=0,
-            max_times=3,
         ),
+        optional=True,
     )
 
+    type = "table_option_statement"
+
+    match_grammar = Sequence(
+        "WITH",
+        Bracketed(
+            Delimited(
+                AnyNumberOf(
+                    Sequence("MEMORY_OPTIMIZED", Ref("EqualsSegment"), "ON"),
+                    Sequence(
+                        "DURABILITY",
+                        Ref("EqualsSegment"),
+                        OneOf("SCHEMA_ONLY", "SCHEMA_AND_DATA"),
+                    ),
+                    Sequence(
+                        "SYSTEM_VERSIONING",
+                        Ref("EqualsSegment"),
+                        "ON",
+                        Sequence(
+                            Bracketed(
+                                "HISTORY_TABLE",
+                                Ref("EqualsSegment"),
+                                Ref("TableReferenceSegment"),
+                                Sequence(
+                                    Ref("CommaSegment"),
+                                    "DATA_CONSISTENCY_CHECK",
+                                    Ref("EqualsSegment"),
+                                    OneOf("ON", "OFF"),
+                                    optional=True,
+                                ),
+                            ),
+                            optional=True,
+                        ),
+                    ),
+                    Sequence(
+                        "DATA_COMPRESSION",
+                        Ref("EqualsSegment"),
+                        OneOf(
+                            "NONE",
+                            "ROW",
+                            "PAGE",
+                        ),
+                        _on_partitions,
+                    ),
+                    Sequence(
+                        "XML_COMPRESSION",
+                        Ref("EqualsSegment"),
+                        OneOf("ON", "OFF"),
+                        _on_partitions,
+                    ),
+                    Sequence(
+                        "FILETABLE_DIRECTORY",
+                        Ref("EqualsSegment"),
+                        Ref("LiteralGrammar"),
+                    ),
+                    Sequence(
+                        OneOf(
+                            "FILETABLE_COLLATE_FILENAME",
+                            "FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME",
+                            "FILETABLE_STREAMID_UNIQUE_CONSTRAINT_NAME",
+                            "FILETABLE_FULLPATH_UNIQUE_CONSTRAINT_NAME",
+                        ),
+                        Ref("EqualsSegment"),
+                        Ref("ObjectReferenceSegment"),
+                    ),
+                    Sequence(
+                        "REMOTE_DATA_ARCHIVE",
+                        Ref("EqualsSegment"),
+                        OneOf(
+                            Sequence(
+                                "ON",
+                                Bracketed(
+                                    Delimited(
+                                        Sequence(
+                                            "FILTER_PREDICATE",
+                                            Ref("EqualsSegment"),
+                                            OneOf(
+                                                "NULL",
+                                                Ref("FunctionNameSegment"),
+                                            ),
+                                            optional=True,
+                                        ),
+                                        Sequence(
+                                            "MIGRATION_STATE",
+                                            Ref("EqualsSegment"),
+                                            OneOf("OUTBOUND", "INBOUND", "PAUSED"),
+                                        ),
+                                    ),
+                                    optional=True,
+                                ),
+                            ),
+                            Sequence(
+                                "OFF",
+                                Bracketed(
+                                    "MIGRATION_STATE",
+                                    Ref("EqualsSegment"),
+                                    "PAUSED",
+                                ),
+                            ),
+                        ),
+                    ),
+                    Sequence(
+                        "DATA_DELETION",
+                        Ref("EqualsSegment"),
+                        "ON",
+                        Bracketed(
+                            "FILTER_COLUMN",
+                            Ref("EqualsSegment"),
+                            Ref("ColumnReferenceSegment"),
+                            Ref("CommaSegment"),
+                            "RETENTION_PERIOD",
+                            Ref("EqualsSegment"),
+                            Ref("NumericLiteralSegment", optional=True),
+                            Ref("DatetimeUnitSegment"),
+                        ),
+                    ),
+                    Sequence(
+                        "LEDGER",
+                        Ref("EqualsSegment"),
+                        OneOf(
+                            Sequence(
+                                "ON",
+                                Bracketed(
+                                    Delimited(
+                                        Sequence(
+                                            "LEDGER_VIEW",
+                                            Ref("EqualsSegment"),
+                                            Ref("TableReferenceSegment"),
+                                            Bracketed(
+                                                _ledger_view_option, optional=True
+                                            ),
+                                            optional=True,
+                                        ),
+                                        Sequence(
+                                            "APPEND_ONLY",
+                                            Ref("EqualsSegment"),
+                                            OneOf("ON", "OFF"),
+                                            optional=True,
+                                        ),
+                                    ),
+                                    optional=True,
+                                ),
+                            ),
+                            "OFF",
+                        ),
+                    ),
+                )
+            )
+        ),
+    )
+
+
+class ReferencesConstraintGrammar(BaseSegment):
+    """REFERENCES constraint option in `CREATE TABLE` statement.
+
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
+    """
+
+    type = "references_constraint_grammar"
+    match_grammar = Sequence(
+        # REFERENCES reftable [ ( refcolumn) ]
+        "REFERENCES",
+        Ref("TableReferenceSegment"),
+        # Foreign columns making up FOREIGN KEY constraint
+        Ref("BracketedColumnReferenceListGrammar", optional=True),
+        Sequence(
+            "ON",
+            "DELETE",
+            OneOf(
+                Sequence("NO", "ACTION"),
+                "CASCADE",
+                Sequence("SET", "NULL"),
+                Sequence("SET", "DEFAULT"),
+            ),
+            optional=True,
+        ),
+        Sequence(
+            "ON",
+            "UPDATE",
+            OneOf(
+                Sequence("NO", "ACTION"),
+                "CASCADE",
+                Sequence("SET", "NULL"),
+                Sequence("SET", "DEFAULT"),
+            ),
+            optional=True,
+        ),
+        Sequence("NOT", "FOR", "REPLICATION", optional=True),
+    )
+
+
+class CheckConstraintGrammar(BaseSegment):
+    """CHECK constraint option in `CREATE TABLE` statement.
+
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
+    """
+
+    type = "check_constraint_grammar"
+    match_grammar = Sequence(
+        "CHECK",
+        Sequence("NOT", "FOR", "REPLICATION", optional=True),
+        Bracketed(
+            Ref("ExpressionSegment"),
+        ),
+    )
+
+
+class RelationalIndexOptionsSegment(BaseSegment):
+    """A relational index options in `CREATE INDEX` statement.
+
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15
+    """
+
+    type = "relational_index_options"
+    match_grammar = Sequence(
+        "WITH",
+        OptionallyBracketed(
+            Delimited(
+                AnyNumberOf(
+                    Sequence(
+                        OneOf(
+                            "PAD_INDEX",
+                            "FILLFACTOR",
+                            "SORT_IN_TEMPDB",
+                            "IGNORE_DUP_KEY",
+                            "STATISTICS_NORECOMPUTE",
+                            "STATISTICS_INCREMENTAL",
+                            "DROP_EXISTING",
+                            "RESUMABLE",
+                            "ALLOW_ROW_LOCKS",
+                            "ALLOW_PAGE_LOCKS",
+                            "OPTIMIZE_FOR_SEQUENTIAL_KEY",
+                            "MAXDOP",
+                        ),
+                        Ref("EqualsSegment"),
+                        OneOf(
+                            "ON",
+                            "OFF",
+                            Ref("LiteralGrammar"),
+                        ),
+                    ),
+                    Ref("MaxDurationSegment"),
+                    Sequence(
+                        "ONLINE",
+                        Ref("EqualsSegment"),
+                        OneOf(
+                            "OFF",
+                            Sequence(
+                                "ON",
+                                Bracketed(
+                                    Sequence(
+                                        "WAIT_AT_LOW_PRIORITY",
+                                        Bracketed(
+                                            Delimited(
+                                                Ref("MaxDurationSegment"),
+                                                Sequence(
+                                                    "ABORT_AFTER_WAIT",
+                                                    Ref("EqualsSegment"),
+                                                    OneOf(
+                                                        "NONE",
+                                                        "SELF",
+                                                        "BLOCKERS",
+                                                    ),
+                                                ),
+                                            ),
+                                        ),
+                                    ),
+                                    optional=True,
+                                ),
+                            ),
+                        ),
+                    ),
+                    # for table constrains
+                    Sequence(
+                        "COMPRESSION_DELAY",
+                        Ref("EqualsSegment"),
+                        Ref("NumericLiteralSegment"),
+                        Sequence(
+                            "MINUTES",
+                            optional=True,
+                        ),
+                    ),
+                    Sequence(
+                        "DATA_COMPRESSION",
+                        Ref("EqualsSegment"),
+                        OneOf(
+                            "NONE",
+                            "ROW",
+                            "PAGE",
+                            "COLUMNSTORE",  # for table constrains
+                            "COLUMNSTORE_ARCHIVE",  # for table constrains
+                        ),
+                        Ref("OnPartitionsSegment", optional=True),
+                    ),
+                    min_times=1,
+                ),
+            ),
+        ),
+    )
+
+
+class MaxDurationSegment(BaseSegment):
+    """A `MAX DURATION` clause.
+
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15
+    """
+
+    type = "max_duration"
+    match_grammar = Sequence(
+        "MAX_DURATION",
+        Ref("EqualsSegment"),
+        Ref("NumericLiteralSegment"),
+        Sequence(
+            "MINUTES",
+            optional=True,
+        ),
+    )
+
+
+class DropIndexStatementSegment(ansi.DropIndexStatementSegment):
+    """A `DROP INDEX` statement.
+
+    Overriding ANSI to include required ON clause.
+    """
+
+    match_grammar = Sequence(
+        "DROP",
+        "INDEX",
+        Ref("IfExistsGrammar", optional=True),
+        Ref("IndexReferenceSegment"),
+        "ON",
+        Ref("TableReferenceSegment"),
+        Ref("DelimiterGrammar", optional=True),
+    )
+
+
+class DropStatisticsStatementSegment(BaseSegment):
+    """A `DROP STATISTICS` statement."""
+
+    type = "drop_statement"
+    # DROP INDEX <Index name> [CONCURRENTLY] [IF EXISTS] {RESTRICT | CASCADE}
+    match_grammar = Sequence(
+        "DROP",
+        OneOf("STATISTICS"),
+        Ref("IndexReferenceSegment"),
+        Ref("DelimiterGrammar", optional=True),
+    )
+
+
+class UpdateStatisticsStatementSegment(BaseSegment):
+    """An `UPDATE STATISTICS` statement.
+
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/update-statistics-transact-sql?view=sql-server-ver15
+    """
+
+    type = "update_statistics_statement"
+    match_grammar = Sequence(
+        "UPDATE",
+        "STATISTICS",
+        Ref("ObjectReferenceSegment"),
+        OneOf(
+            Ref("SingleIdentifierGrammar"),
+            Bracketed(
+                Delimited(
+                    Ref("SingleIdentifierGrammar"),
+                ),
+            ),
+            optional=True,
+        ),
+        Ref("DelimiterGrammar", optional=True),
+        Sequence("WITH", OneOf("FULLSCAN", "RESAMPLE"), optional=True),
+    )
+
+
+class ObjectReferenceSegment(ansi.ObjectReferenceSegment):
+    """A reference to an object.
+
+    Update ObjectReferenceSegment to only allow dot separated SingleIdentifierGrammar
+    So Square Bracketed identifiers can be matched.
+    """
+
+    # match grammar (allow whitespace)
+    match_grammar: Matchable = Sequence(
+        Ref("SingleIdentifierGrammar"),
+        AnyNumberOf(
+            Sequence(
+                Ref("DotSegment"),
+                Ref("SingleIdentifierGrammar", optional=True),
+            ),
+            min_times=0,
+            max_times=3,
+        ),
+    )
+
+
+class TableReferenceSegment(ObjectReferenceSegment):
+    """A reference to an table, CTE, subquery or alias.
+
+    Overriding to capture TSQL's override of ObjectReferenceSegment
+    """
+
+    type = "table_reference"
+
 
-class TableReferenceSegment(ObjectReferenceSegment):
-    """A reference to an table, CTE, subquery or alias.
-
-    Overriding to capture TSQL's override of ObjectReferenceSegment
-    """
-
-    type = "table_reference"
-
-
 class SchemaReferenceSegment(ObjectReferenceSegment):
     """A reference to a schema.
 
@@ -1281,6 +1936,25 @@ class GoStatementSegment(BaseSegment):
     match_grammar = Ref.keyword("GO")
 
 
+class BracketedArguments(ansi.BracketedArguments):
+    """A series of bracketed arguments.
+
+    e.g. the bracketed part of numeric(1, 3)
+    """
+
+    match_grammar = Bracketed(
+        Delimited(
+            OneOf(
+                # TSQL allows optional MAX in some data types
+                "MAX",
+                Ref("ExpressionSegment"),
+            ),
+            # The brackets might be empty for some cases...
+            optional=True,
+        ),
+    )
+
+
 class DatatypeSegment(BaseSegment):
     """A data type segment.
 
@@ -1300,16 +1974,9 @@ class DatatypeSegment(BaseSegment):
             Ref("DatatypeIdentifierSegment"),
             Bracketed(Ref("DatatypeIdentifierSegment"), bracket_type="square"),
         ),
-        Bracketed(
-            OneOf(
-                "MAX",
-                Delimited(Ref("ExpressionSegment")),
-                # The brackets might be empty for some cases...
-                optional=True,
-            ),
-            # There may be no brackets for some data types
-            optional=True,
-        ),
+        # Stop Gap until explicit Data Types as only relevent for character
+        Ref.keyword("VARYING", optional=True),
+        Ref("BracketedArguments", optional=True),
         Ref("CharCharacterSetGrammar", optional=True),
     )
 
@@ -1557,7 +2224,10 @@ class FunctionParameterListGrammar(BaseSegment):
     # Function parameter list
     match_grammar = Bracketed(
         Delimited(
-            Ref("FunctionParameterGrammar"),
+            Sequence(
+                Ref("FunctionParameterGrammar"),
+                Sequence("READONLY", optional=True),
+            ),
             optional=True,
         ),
     )
@@ -1977,11 +2647,23 @@ class ReservedKeywordFunctionNameSegment(BaseSegment):
     type = "function_name"
     match_grammar = OneOf(
         "COALESCE",
-        "CURRENT_TIMESTAMP",
-        "CURRENT_USER",
         "LEFT",
         "NULLIF",
         "RIGHT",
+    )
+
+
+class ReservedKeywordBareFunctionNameSegment(BaseSegment):
+    """Reserved keywords that are functions without parentheses.
+
+    Need to be able to specify this as type function_name
+    so that linting rules identify it properly
+    """
+
+    type = "function_name"
+    match_grammar = OneOf(
+        "CURRENT_TIMESTAMP",
+        "CURRENT_USER",
         "SESSION_USER",
         "SYSTEM_USER",
     )
@@ -2107,6 +2789,7 @@ class FunctionSegment(BaseSegment):
 
     type = "function"
     match_grammar = OneOf(
+        Ref("ReservedKeywordBareFunctionNameSegment"),
         Sequence(
             # Treat functions which take date parts separately
             # So those functions parse date parts as DatetimeUnitSegment
@@ -2219,6 +2902,7 @@ class CreateTableStatementSegment(BaseSegment):
                             Ref("TableConstraintSegment"),
                             Ref("ColumnDefinitionSegment"),
                             Ref("TableIndexSegment"),
+                            Ref("PeriodSegment"),
                         ),
                         allow_trailing=True,
                     )
@@ -2238,7 +2922,7 @@ class CreateTableStatementSegment(BaseSegment):
         Ref("OnPartitionOrFilegroupOptionSegment", optional=True),
         Ref("FilestreamOnOptionSegment", optional=True),
         Ref("TextimageOnOptionSegment", optional=True),
-        # need to add table options here
+        Ref("TableOptionSegment", optional=True),
         Ref("DelimiterGrammar", optional=True),
     )
 
@@ -2267,13 +2951,16 @@ class AlterTableStatementSegment(BaseSegment):
                     OneOf(Ref("LiteralGrammar"), Ref("NakedIdentifierSegment")),
                 ),
                 Sequence(
-                    OneOf(
-                        "ADD",
-                        "ALTER",
-                    ),
-                    Ref.keyword("COLUMN", optional=True),
+                    "ALTER",
+                    "COLUMN",
                     Ref("ColumnDefinitionSegment"),
                 ),
+                Sequence(
+                    "ADD",
+                    Delimited(
+                        Ref("ColumnDefinitionSegment"),
+                    ),
+                ),
                 Sequence(
                     "DROP",
                     "COLUMN",
@@ -2309,7 +2996,79 @@ class AlterTableStatementSegment(BaseSegment):
                     OneOf("AS", "TO", optional=True),
                     Ref("TableReferenceSegment"),
                 ),
-            ),
+                Sequence(
+                    "SET",
+                    OneOf(
+                        Bracketed(
+                            Sequence(
+                                "FILESTREAM_ON",
+                                Ref("EqualsSegment"),
+                                OneOf(
+                                    Ref("FilegroupNameSegment"),
+                                    Ref("PartitionSchemeNameSegment"),
+                                    OneOf(
+                                        "NULL",
+                                        Ref("LiteralGrammar"),  # for "default" value
+                                    ),
+                                ),
+                            )
+                        ),
+                        Bracketed(
+                            Sequence(
+                                "SYSTEM_VERSIONING",
+                                Ref("EqualsSegment"),
+                                OneOf("ON", "OFF"),
+                                Sequence(
+                                    Bracketed(
+                                        "HISTORY_TABLE",
+                                        Ref("EqualsSegment"),
+                                        Ref("TableReferenceSegment"),
+                                        Sequence(
+                                            Ref("CommaSegment"),
+                                            "DATA_CONSISTENCY_CHECK",
+                                            Ref("EqualsSegment"),
+                                            OneOf("ON", "OFF"),
+                                            optional=True,
+                                        ),
+                                        Sequence(
+                                            Ref("CommaSegment"),
+                                            "HISTORY_RETENTION_PERIOD",
+                                            Ref("EqualsSegment"),
+                                            Ref("NumericLiteralSegment", optional=True),
+                                            Ref("DatetimeUnitSegment"),
+                                            optional=True,
+                                        ),
+                                    ),
+                                    optional=True,
+                                ),
+                            )
+                        ),
+                        Bracketed(
+                            Sequence(
+                                "DATA_DELETION",
+                                Ref("EqualsSegment"),
+                                OneOf("ON", "OFF"),
+                                Sequence(
+                                    Bracketed(
+                                        "FILTER_COLUMN",
+                                        Ref("EqualsSegment"),
+                                        Ref("ColumnReferenceSegment"),
+                                        Sequence(
+                                            Ref("CommaSegment"),
+                                            "RETENTION_PERIOD",
+                                            Ref("EqualsSegment"),
+                                            Ref("NumericLiteralSegment", optional=True),
+                                            Ref("DatetimeUnitSegment"),
+                                            optional=True,
+                                        ),
+                                    ),
+                                    optional=True,
+                                ),
+                            ),
+                        ),
+                    ),
+                ),
+            )
         ),
     )
 
@@ -2556,7 +3315,7 @@ class TableLocationClause(BaseSegment):
         Ref("EqualsSegment"),
         OneOf(
             "USER_DB",  # Azure Synapse Analytics specific
-            Ref("QuotedLiteralSegment"),  # External Table
+            Ref("QuotedLiteralSegmentOptWithN"),  # External Table
         ),
     )
 
@@ -2918,1268 +3677,1906 @@ class DeleteStatementSegment(BaseSegment):
     )
 
 
-class FromClauseSegment(BaseSegment):
-    """A `FROM` clause like in `SELECT`.
+class FromClauseSegment(BaseSegment):
+    """A `FROM` clause like in `SELECT`.
+
+    NOTE: this is a delimited set of table expressions, with a variable
+    number of optional join clauses with those table expressions. The
+    delmited aspect is the higher of the two such that the following is
+    valid (albeit unusual):
+
+    ```
+    SELECT *
+    FROM a JOIN b, c JOIN d
+    ```
+
+    Overriding ANSI to remove Delimited logic which assumes statements have been
+    delimited
+    """
+
+    type = "from_clause"
+    match_grammar = Sequence(
+        "FROM",
+        Delimited(Ref("FromExpressionSegment")),
+        Ref("DelimiterGrammar", optional=True),
+    )
+
+    get_eventual_aliases = ansi.FromClauseSegment.get_eventual_aliases
+
+
+class FromExpressionElementSegment(ansi.FromExpressionElementSegment):
+    """FROM Expression Element Segment.
+
+    Overriding ANSI to add Temporal Query.
+    """
+
+    match_grammar = ansi.FromExpressionElementSegment.match_grammar.copy(
+        insert=[
+            Ref("TemporalQuerySegment", optional=True),
+        ],
+        before=Ref(
+            "AliasExpressionSegment",
+            exclude=OneOf(
+                Ref("SamplingExpressionSegment"),
+                Ref("JoinLikeClauseGrammar"),
+            ),
+            optional=True,
+        ),
+    )
+
+
+class TableExpressionSegment(BaseSegment):
+    """The main table expression e.g. within a FROM clause.
+
+    In SQL standard, as well as T-SQL, table expressions (`table reference` in SQL
+    standard) can also be join tables, optionally bracketed, allowing for nested joins.
+    """
+
+    type = "table_expression"
+    match_grammar: Matchable = OneOf(
+        Ref("ValuesClauseSegment"),
+        Ref("BareFunctionSegment"),
+        Ref("FunctionSegment"),
+        Ref("OpenRowSetSegment"),
+        Ref("OpenJsonSegment"),
+        Ref("TableReferenceSegment"),
+        # Nested Selects
+        Bracketed(Ref("SelectableGrammar")),
+        Bracketed(Ref("MergeStatementSegment")),
+        Bracketed(
+            Sequence(
+                Ref("TableExpressionSegment"),
+                # TODO: Revisit this to make sure it's sensible.
+                Conditional(Dedent, indented_joins=False),
+                Conditional(Indent, indented_joins=True),
+                OneOf(Ref("JoinClauseSegment"), Ref("JoinLikeClauseGrammar")),
+                Conditional(Dedent, indented_joins=True),
+                Conditional(Indent, indented_joins=True),
+            )
+        ),
+    )
+
+
+class GroupByClauseSegment(BaseSegment):
+    """A `GROUP BY` clause like in `SELECT`.
+
+    Overriding ANSI to remove Delimited logic which assumes statements have been
+    delimited
+    """
+
+    type = "groupby_clause"
+    match_grammar = Sequence(
+        "GROUP",
+        "BY",
+        Indent,
+        OneOf(
+            Ref("ColumnReferenceSegment"),
+            # Can `GROUP BY 1`
+            Ref("NumericLiteralSegment"),
+            # Can `GROUP BY coalesce(col, 1)`
+            Ref("ExpressionSegment"),
+        ),
+        AnyNumberOf(
+            Ref("CommaSegment"),
+            OneOf(
+                Ref("ColumnReferenceSegment"),
+                # Can `GROUP BY 1`
+                Ref("NumericLiteralSegment"),
+                # Can `GROUP BY coalesce(col, 1)`
+                Ref("ExpressionSegment"),
+            ),
+        ),
+        Dedent,
+    )
+
+
+class HavingClauseSegment(BaseSegment):
+    """A `HAVING` clause like in `SELECT`.
+
+    Overriding ANSI to remove StartsWith with greedy terminator
+    """
+
+    type = "having_clause"
+    match_grammar = Sequence(
+        "HAVING",
+        Indent,
+        OptionallyBracketed(Ref("ExpressionSegment")),
+        Dedent,
+    )
+
+
+class OrderByClauseSegment(BaseSegment):
+    """A `ORDER BY` clause like in `SELECT`.
+
+    Overriding ANSI to remove StartsWith logic which assumes statements have been
+    delimited
+    """
+
+    type = "orderby_clause"
+    match_grammar = Sequence(
+        "ORDER",
+        "BY",
+        Indent,
+        Delimited(
+            Sequence(
+                OneOf(
+                    Ref("ColumnReferenceSegment"),
+                    # Can `ORDER BY 1`
+                    Ref("NumericLiteralSegment"),
+                    # Can order by an expression
+                    Ref("ExpressionSegment"),
+                ),
+                OneOf("ASC", "DESC", optional=True),
+            ),
+        ),
+        Dedent,
+    )
+
+
+class RenameStatementSegment(BaseSegment):
+    """`RENAME` statement.
+
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/rename-transact-sql?view=aps-pdw-2016-au7
+    Azure Synapse Analytics-specific.
+    """
+
+    type = "rename_statement"
+    match_grammar = Sequence(
+        "RENAME",
+        "OBJECT",
+        Ref("ObjectReferenceSegment"),
+        "TO",
+        Ref("SingleIdentifierGrammar"),
+        Ref("DelimiterGrammar", optional=True),
+    )
+
+
+class DropTableStatementSegment(ansi.DropTableStatementSegment):
+    """A `DROP TABLE` statement.
 
-    NOTE: this is a delimited set of table expressions, with a variable
-    number of optional join clauses with those table expressions. The
-    delmited aspect is the higher of the two such that the following is
-    valid (albeit unusual):
+    Overriding ANSI to add optional delimiter.
+    """
 
-    ```
-    SELECT *
-    FROM a JOIN b, c JOIN d
-    ```
+    match_grammar = ansi.DropTableStatementSegment.match_grammar.copy(
+        insert=[
+            Ref("DelimiterGrammar", optional=True),
+        ],
+    )
 
-    Overriding ANSI to remove Delimited logic which assumes statements have been
-    delimited
+
+class DropViewStatementSegment(ansi.DropViewStatementSegment):
+    """A `DROP VIEW` statement.
+
+    Overriding ANSI to add optional delimiter.
     """
 
-    type = "from_clause"
-    match_grammar = Sequence(
-        "FROM",
-        Delimited(Ref("FromExpressionSegment")),
-        Ref("DelimiterGrammar", optional=True),
+    match_grammar = ansi.DropViewStatementSegment.match_grammar.copy(
+        insert=[
+            Ref("DelimiterGrammar", optional=True),
+        ],
     )
 
-    get_eventual_aliases = ansi.FromClauseSegment.get_eventual_aliases
 
+class DropUserStatementSegment(ansi.DropUserStatementSegment):
+    """A `DROP USER` statement.
 
-class TableExpressionSegment(BaseSegment):
-    """The main table expression e.g. within a FROM clause.
+    Overriding ANSI to add optional delimiter.
+    """
 
-    In SQL standard, as well as T-SQL, table expressions (`table reference` in SQL
-    standard) can also be join tables, optionally bracketed, allowing for nested joins.
+    match_grammar = ansi.DropUserStatementSegment.match_grammar.copy(
+        insert=[
+            Ref("DelimiterGrammar", optional=True),
+        ],
+    )
+
+
+class UpdateStatementSegment(BaseSegment):
+    """An `Update` statement.
+
+    UPDATE <table name> SET <set clause list> [ WHERE <search condition> ]
+    Overriding ANSI in order to allow for PostTableExpressionGrammar (table hints)
     """
 
-    type = "table_expression"
-    match_grammar: Matchable = OneOf(
-        Ref("ValuesClauseSegment"),
-        Ref("BareFunctionSegment"),
-        Ref("FunctionSegment"),
-        Ref("OpenRowSetSegment"),
-        Ref("TableReferenceSegment"),
-        # Nested Selects
-        Bracketed(Ref("SelectableGrammar")),
-        Bracketed(Ref("MergeStatementSegment")),
-        Bracketed(
-            Sequence(
-                Ref("TableExpressionSegment"),
-                Conditional(Dedent, indented_joins=False),
-                OneOf(Ref("JoinClauseSegment"), Ref("JoinLikeClauseGrammar")),
-                Conditional(Dedent, indented_joins=True),
-            )
-        ),
+    type = "update_statement"
+    match_grammar = Sequence(
+        "UPDATE",
+        OneOf(Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar")),
+        Ref("PostTableExpressionGrammar", optional=True),
+        Ref("SetClauseListSegment"),
+        Ref("OutputClauseSegment", optional=True),
+        Ref("FromClauseSegment", optional=True),
+        Ref("WhereClauseSegment", optional=True),
+        Ref("OptionClauseSegment", optional=True),
+        Ref("DelimiterGrammar", optional=True),
     )
 
 
-class GroupByClauseSegment(BaseSegment):
-    """A `GROUP BY` clause like in `SELECT`.
+class SetClauseListSegment(BaseSegment):
+    """set clause list.
 
-    Overriding ANSI to remove Delimited logic which assumes statements have been
-    delimited
+    Overriding ANSI to remove Delimited
     """
 
-    type = "groupby_clause"
+    type = "set_clause_list"
     match_grammar = Sequence(
-        "GROUP",
-        "BY",
+        "SET",
         Indent,
-        OneOf(
-            Ref("ColumnReferenceSegment"),
-            # Can `GROUP BY 1`
-            Ref("NumericLiteralSegment"),
-            # Can `GROUP BY coalesce(col, 1)`
-            Ref("ExpressionSegment"),
-        ),
+        Ref("SetClauseSegment"),
         AnyNumberOf(
             Ref("CommaSegment"),
-            OneOf(
-                Ref("ColumnReferenceSegment"),
-                # Can `GROUP BY 1`
-                Ref("NumericLiteralSegment"),
-                # Can `GROUP BY coalesce(col, 1)`
-                Ref("ExpressionSegment"),
-            ),
+            Ref("SetClauseSegment"),
         ),
         Dedent,
     )
 
 
-class HavingClauseSegment(BaseSegment):
-    """A `HAVING` clause like in `SELECT`.
+class SetClauseSegment(BaseSegment):
+    """Set clause.
 
-    Overriding ANSI to remove StartsWith with greedy terminator
+    Overriding ANSI to allow for ExpressionSegment on the right
     """
 
-    type = "having_clause"
+    type = "set_clause"
+
     match_grammar = Sequence(
-        "HAVING",
-        Indent,
-        OptionallyBracketed(Ref("ExpressionSegment")),
-        Dedent,
+        Ref("ColumnReferenceSegment"),
+        Ref("AssignmentOperatorSegment"),
+        Ref("ExpressionSegment"),
     )
 
 
-class OrderByClauseSegment(BaseSegment):
-    """A `ORDER BY` clause like in `SELECT`.
+class PrintStatementSegment(BaseSegment):
+    """PRINT statement segment."""
+
+    type = "print_statement"
+    match_grammar = Sequence(
+        "PRINT",
+        Ref("ExpressionSegment"),
+        Ref("DelimiterGrammar", optional=True),
+    )
+
+
+class OptionClauseSegment(BaseSegment):
+    """Query Hint clause.
+
+    https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-query?view=sql-server-ver15
+    """
+
+    type = "option_clause"
+    match_grammar = Sequence(
+        "OPTION",
+        Bracketed(
+            Delimited(Ref("QueryHintSegment")),
+        ),
+    )
+
+
+class QueryHintSegment(BaseSegment):
+    """Query Hint segment.
+
+    https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-query?view=sql-server-ver15
+    """
+
+    type = "query_hint_segment"
+    match_grammar = OneOf(
+        Sequence(  # Azure Synapse Analytics specific
+            "LABEL",
+            Ref("EqualsSegment"),
+            Ref("QuotedLiteralSegmentOptWithN"),
+        ),
+        Sequence(
+            OneOf("HASH", "ORDER"),
+            "GROUP",
+        ),
+        Sequence(OneOf("MERGE", "HASH", "CONCAT"), "UNION"),
+        Sequence(OneOf("LOOP", "MERGE", "HASH"), "JOIN"),
+        Sequence("EXPAND", "VIEWS"),
+        Sequence(
+            OneOf(
+                "FAST",
+                "MAXDOP",
+                "MAXRECURSION",
+                "QUERYTRACEON",
+                Sequence(
+                    OneOf(
+                        "MAX_GRANT_PERCENT",
+                        "MIN_GRANT_PERCENT",
+                    ),
+                    Ref("EqualsSegment"),
+                ),
+            ),
+            Ref("NumericLiteralSegment"),
+        ),
+        Sequence("FORCE", "ORDER"),
+        Sequence(
+            OneOf("FORCE", "DISABLE"),
+            OneOf("EXTERNALPUSHDOWN", "SCALEOUTEXECUTION"),
+        ),
+        Sequence(
+            OneOf(
+                "KEEP",
+                "KEEPFIXED",
+                "ROBUST",
+            ),
+            "PLAN",
+        ),
+        "IGNORE_NONCLUSTERED_COLUMNSTORE_INDEX",
+        "NO_PERFORMANCE_SPOOL",
+        Sequence(
+            "OPTIMIZE",
+            "FOR",
+            OneOf(
+                "UNKNOWN",
+                Bracketed(
+                    Ref("ParameterNameSegment"),
+                    OneOf(
+                        "UNKNOWN", Sequence(Ref("EqualsSegment"), Ref("LiteralGrammar"))
+                    ),
+                    AnyNumberOf(
+                        Ref("CommaSegment"),
+                        Ref("ParameterNameSegment"),
+                        OneOf(
+                            "UNKNOWN",
+                            Sequence(Ref("EqualsSegment"), Ref("LiteralGrammar")),
+                        ),
+                    ),
+                ),
+            ),
+        ),
+        Sequence("PARAMETERIZATION", OneOf("SIMPLE", "FORCED")),
+        "RECOMPILE",
+        Sequence(
+            "USE",
+            "HINT",
+            Bracketed(
+                Ref("QuotedLiteralSegment"),
+                AnyNumberOf(Ref("CommaSegment"), Ref("QuotedLiteralSegment")),
+            ),
+        ),
+        Sequence(
+            "USE",
+            "PLAN",
+            Ref("QuotedLiteralSegmentOptWithN"),
+        ),
+        Sequence(
+            "TABLE",
+            "HINT",
+            Ref("ObjectReferenceSegment"),
+            Delimited(Ref("TableHintSegment")),
+        ),
+    )
+
+
+class PostTableExpressionGrammar(BaseSegment):
+    """Table Hint clause.  Overloading the PostTableExpressionGrammar to implement.
 
-    Overriding ANSI to remove StartsWith logic which assumes statements have been
-    delimited
+    https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table?view=sql-server-ver15
     """
 
-    type = "orderby_clause"
+    type = "post_table_expression"
     match_grammar = Sequence(
-        "ORDER",
-        "BY",
-        Indent,
-        Delimited(
-            Sequence(
-                OneOf(
-                    Ref("ColumnReferenceSegment"),
-                    # Can `ORDER BY 1`
-                    Ref("NumericLiteralSegment"),
-                    # Can order by an expression
-                    Ref("ExpressionSegment"),
-                ),
-                OneOf("ASC", "DESC", optional=True),
+        Sequence("WITH", optional=True),
+        Bracketed(
+            Ref("TableHintSegment"),
+            AnyNumberOf(
+                Ref("CommaSegment"),
+                Ref("TableHintSegment"),
             ),
         ),
-        Dedent,
     )
 
 
-class RenameStatementSegment(BaseSegment):
-    """`RENAME` statement.
+class TableHintSegment(BaseSegment):
+    """Table Hint segment.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/rename-transact-sql?view=aps-pdw-2016-au7
-    Azure Synapse Analytics-specific.
+    https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table?view=sql-server-ver15
     """
 
-    type = "rename_statement"
-    match_grammar = Sequence(
-        "RENAME",
-        "OBJECT",
-        Ref("ObjectReferenceSegment"),
-        "TO",
-        Ref("SingleIdentifierGrammar"),
-        Ref("DelimiterGrammar", optional=True),
+    type = "query_hint_segment"
+    match_grammar = OneOf(
+        "NOEXPAND",
+        Sequence(
+            "INDEX",
+            Bracketed(
+                Delimited(
+                    OneOf(Ref("IndexReferenceSegment"), Ref("NumericLiteralSegment")),
+                ),
+            ),
+        ),
+        Sequence(
+            "INDEX",
+            Ref("EqualsSegment"),
+            Bracketed(
+                OneOf(Ref("IndexReferenceSegment"), Ref("NumericLiteralSegment")),
+            ),
+        ),
+        "KEEPIDENTITY",
+        "KEEPDEFAULTS",
+        Sequence(
+            "FORCESEEK",
+            Bracketed(
+                Ref("IndexReferenceSegment"),
+                Bracketed(
+                    Ref("SingleIdentifierGrammar"),
+                    AnyNumberOf(Ref("CommaSegment"), Ref("SingleIdentifierGrammar")),
+                ),
+                optional=True,
+            ),
+        ),
+        "FORCESCAN",
+        "HOLDLOCK",
+        "IGNORE_CONSTRAINTS",
+        "IGNORE_TRIGGERS",
+        "NOLOCK",
+        "NOWAIT",
+        "PAGLOCK",
+        "READCOMMITTED",
+        "READCOMMITTEDLOCK",
+        "READPAST",
+        "READUNCOMMITTED",
+        "REPEATABLEREAD",
+        "ROWLOCK",
+        "SERIALIZABLE",
+        "SNAPSHOT",
+        Sequence(
+            "SPATIAL_WINDOW_MAX_CELLS",
+            Ref("EqualsSegment"),
+            Ref("NumericLiteralSegment"),
+        ),
+        "TABLOCK",
+        "TABLOCKX",
+        "UPDLOCK",
+        "XLOCK",
     )
 
 
-class DropTableStatementSegment(ansi.DropTableStatementSegment):
-    """A `DROP TABLE` statement.
+class SetOperatorSegment(BaseSegment):
+    """A set operator such as Union, Except or Intersect.
 
-    Overriding ANSI to add optional delimiter.
+    Override ANSI to remove TSQL non-keyword MINUS.
     """
 
-    match_grammar = ansi.DropTableStatementSegment.match_grammar.copy(
-        insert=[
-            Ref("DelimiterGrammar", optional=True),
-        ],
+    type = "set_operator"
+    match_grammar = OneOf(
+        Sequence("UNION", OneOf("DISTINCT", "ALL", optional=True)),
+        "INTERSECT",
+        "EXCEPT",
     )
 
 
-class DropViewStatementSegment(ansi.DropViewStatementSegment):
-    """A `DROP VIEW` statement.
+class SetExpressionSegment(BaseSegment):
+    """A set expression with either Union, Minus, Except or Intersect.
 
-    Overriding ANSI to add optional delimiter.
+    Overriding ANSI to include OPTION clause.
     """
 
-    match_grammar = ansi.DropViewStatementSegment.match_grammar.copy(
-        insert=[
-            Ref("DelimiterGrammar", optional=True),
-        ],
+    type = "set_expression"
+    # match grammar
+    match_grammar = Sequence(
+        Ref("NonSetSelectableGrammar"),
+        AnyNumberOf(
+            Sequence(
+                Ref("SetOperatorSegment"),
+                Ref("NonSetSelectableGrammar"),
+            ),
+            min_times=1,
+        ),
+        Ref("OrderByClauseSegment", optional=True),
+        Ref("OptionClauseSegment", optional=True),
+        Ref("DelimiterGrammar", optional=True),
     )
 
 
-class DropUserStatementSegment(ansi.DropUserStatementSegment):
-    """A `DROP USER` statement.
+class ForClauseSegment(BaseSegment):
+    """A For Clause segment for TSQL.
 
-    Overriding ANSI to add optional delimiter.
+    This is used to format results into XML or JSON
     """
 
-    match_grammar = ansi.DropUserStatementSegment.match_grammar.copy(
-        insert=[
-            Ref("DelimiterGrammar", optional=True),
-        ],
+    type = "for_clause"
+
+    _common_directives_for_xml = Sequence(
+        Sequence(
+            "BINARY",
+            "BASE64",
+        ),
+        "TYPE",
+        Sequence(
+            "ROOT",
+            Bracketed(
+                Ref("LiteralGrammar"),
+                optional=True,
+            ),
+        ),
+        optional=True,
     )
 
+    _elements = Sequence("ELEMENTS", OneOf("XSINIL", "ABSENT", optional=True))
 
-class UpdateStatementSegment(BaseSegment):
-    """An `Update` statement.
+    match_grammar = Sequence(
+        "FOR",
+        OneOf(
+            "BROWSE",
+            Sequence(
+                "JSON",
+                Delimited(
+                    OneOf(
+                        "AUTO",
+                        "PATH",
+                    ),
+                    Sequence(
+                        "ROOT",
+                        Bracketed(
+                            Ref("LiteralGrammar"),
+                            optional=True,
+                        ),
+                        optional=True,
+                    ),
+                    Ref.keyword("INCLUDE_NULL_VALUES", optional=True),
+                    Ref.keyword("WITHOUT_ARRAY_WRAPPER", optional=True),
+                ),
+            ),
+            Sequence(
+                "XML",
+                OneOf(
+                    Delimited(
+                        Sequence(
+                            "PATH",
+                            Bracketed(
+                                Ref("LiteralGrammar"),
+                                optional=True,
+                            ),
+                        ),
+                        _common_directives_for_xml,
+                        _elements,
+                    ),
+                    Delimited(
+                        "EXPLICIT",
+                        _common_directives_for_xml,
+                        Ref.keyword("XMLDATA", optional=True),
+                    ),
+                    Delimited(
+                        OneOf(
+                            "AUTO",
+                            Sequence(
+                                "RAW",
+                                Bracketed(
+                                    Ref("LiteralGrammar"),
+                                    optional=True,
+                                ),
+                            ),
+                        ),
+                        _common_directives_for_xml,
+                        _elements,
+                        Sequence(
+                            OneOf(
+                                "XMLDATA",
+                                Sequence(
+                                    "XMLSCHEMA",
+                                    Bracketed(
+                                        Ref("LiteralGrammar"),
+                                        optional=True,
+                                    ),
+                                ),
+                            ),
+                            optional=True,
+                        ),
+                    ),
+                ),
+            ),
+        ),
+    )
 
-    UPDATE <table name> SET <set clause list> [ WHERE <search condition> ]
-    Overriding ANSI in order to allow for PostTableExpressionGrammar (table hints)
+
+class ExecuteScriptSegment(BaseSegment):
+    """`EXECUTE` statement.
+
+    Matching segment name and type from exasol.
+    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/execute-transact-sql?view=sql-server-ver15
     """
 
-    type = "update_statement"
+    type = "execute_script_statement"
     match_grammar = Sequence(
-        "UPDATE",
+        OneOf("EXEC", "EXECUTE"),
+        Sequence(Ref("ParameterNameSegment"), Ref("EqualsSegment"), optional=True),
+        OptionallyBracketed(
+            OneOf(Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"))
+        ),
         Indent,
-        OneOf(Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar")),
-        Ref("PostTableExpressionGrammar", optional=True),
-        Ref("SetClauseListSegment"),
+        Sequence(
+            Sequence(Ref("ParameterNameSegment"), Ref("EqualsSegment"), optional=True),
+            OneOf(
+                "DEFAULT",
+                Ref("LiteralGrammar"),
+                Ref("ParameterNameSegment"),
+                Ref("SingleIdentifierGrammar"),
+            ),
+            Sequence("OUTPUT", optional=True),
+            AnyNumberOf(
+                Ref("CommaSegment"),
+                Sequence(
+                    Ref("ParameterNameSegment"), Ref("EqualsSegment"), optional=True
+                ),
+                OneOf(
+                    "DEFAULT",
+                    Ref("LiteralGrammar"),
+                    Ref("ParameterNameSegment"),
+                    Ref("SingleIdentifierGrammar"),
+                ),
+                Sequence("OUTPUT", optional=True),
+            ),
+            optional=True,
+        ),
         Dedent,
-        Ref("OutputClauseSegment", optional=True),
-        Ref("FromClauseSegment", optional=True),
-        Ref("WhereClauseSegment", optional=True),
-        Ref("OptionClauseSegment", optional=True),
         Ref("DelimiterGrammar", optional=True),
     )
 
 
-class SetClauseListSegment(BaseSegment):
-    """set clause list.
+class CreateSchemaStatementSegment(BaseSegment):
+    """A `CREATE SCHEMA` statement.
 
-    Overriding ANSI to remove Delimited
+    Overriding ANSI to allow for AUTHORIZATION clause
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-schema-transact-sql?view=sql-server-ver15
+
+    Not yet implemented: proper schema_element parsing.
+    Once we have an AccessStatementSegment that works for TSQL, this definition should
+    be tweaked to include schema elements.
     """
 
-    type = "set_clause_list"
+    type = "create_schema_statement"
     match_grammar = Sequence(
-        "SET",
-        Indent,
-        Ref("SetClauseSegment"),
-        AnyNumberOf(
-            Ref("CommaSegment"),
-            Ref("SetClauseSegment"),
+        "CREATE",
+        "SCHEMA",
+        Ref("SchemaReferenceSegment"),
+        Sequence(
+            "AUTHORIZATION",
+            Ref("RoleReferenceSegment"),
+            optional=True,
+        ),
+        Ref(
+            "DelimiterGrammar",
+            optional=True,
         ),
-        Dedent,
     )
 
 
-class SetClauseSegment(BaseSegment):
-    """Set clause.
-
-    Overriding ANSI to allow for ExpressionSegment on the right
-    """
+class MergeStatementSegment(ansi.MergeStatementSegment):
+    """Contains dialect specific `MERGE` statement."""
 
-    type = "set_clause"
+    type = "merge_statement"
 
     match_grammar = Sequence(
-        Ref("ColumnReferenceSegment"),
-        Ref("AssignmentOperatorSegment"),
-        Ref("ExpressionSegment"),
+        Ref("MergeIntoLiteralGrammar"),
+        Indent,
+        Ref("TableReferenceSegment"),
+        Sequence(
+            "WITH",
+            Bracketed(
+                Delimited(
+                    Ref("TableHintSegment", optional=True),
+                )
+            ),
+            optional=True,
+        ),
+        Ref("AliasExpressionSegment", optional=True),
+        Dedent,
+        "USING",
+        Indent,
+        OneOf(
+            Ref("TableReferenceSegment"),
+            Ref("AliasedTableReferenceGrammar"),
+            Sequence(
+                Bracketed(
+                    Ref("SelectableGrammar"),
+                ),
+                Ref("AliasExpressionSegment", optional=True),
+            ),
+        ),
+        Dedent,
+        Conditional(Indent, indented_using_on=True),
+        Ref("JoinOnConditionSegment"),
+        Conditional(Dedent, indented_using_on=True),
+        Ref("MergeMatchSegment"),
     )
 
 
-class PrintStatementSegment(BaseSegment):
-    """PRINT statement segment."""
+class MergeMatchSegment(BaseSegment):
+    """Contains dialect specific merge operations."""
 
-    type = "print_statement"
+    type = "merge_match"
     match_grammar = Sequence(
-        "PRINT",
-        Ref("ExpressionSegment"),
-        Ref("DelimiterGrammar", optional=True),
+        AnyNumberOf(
+            Ref("MergeMatchedClauseSegment"),
+            Ref("MergeNotMatchedClauseSegment"),
+            min_times=1,
+        ),
+        Ref("OutputClauseSegment", optional=True),
+        Ref("OptionClauseSegment", optional=True),
     )
 
 
-class OptionClauseSegment(BaseSegment):
-    """Query Hint clause.
+class MergeMatchedClauseSegment(BaseSegment):
+    """The `WHEN MATCHED` clause within a `MERGE` statement."""
 
-    https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-query?view=sql-server-ver15
-    """
+    type = "merge_when_matched_clause"
 
-    type = "option_clause"
     match_grammar = Sequence(
-        "OPTION",
-        Bracketed(
-            Delimited(Ref("QueryHintSegment")),
+        "WHEN",
+        "MATCHED",
+        Sequence(
+            "AND",
+            Ref("ExpressionSegment"),
+            optional=True,
+        ),
+        Indent,
+        "THEN",
+        OneOf(
+            Ref("MergeUpdateClauseSegment"),
+            Ref("MergeDeleteClauseSegment"),
         ),
+        Dedent,
     )
 
 
-class QueryHintSegment(BaseSegment):
-    """Query Hint segment.
+class MergeNotMatchedClauseSegment(BaseSegment):
+    """The `WHEN NOT MATCHED` clause within a `MERGE` statement."""
 
-    https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-query?view=sql-server-ver15
-    """
+    type = "merge_when_not_matched_clause"
 
-    type = "query_hint_segment"
     match_grammar = OneOf(
-        Sequence(  # Azure Synapse Analytics specific
-            "LABEL",
-            Ref("EqualsSegment"),
-            Ref("QuotedLiteralSegmentOptWithN"),
-        ),
-        Sequence(
-            OneOf("HASH", "ORDER"),
-            "GROUP",
-        ),
-        Sequence(OneOf("MERGE", "HASH", "CONCAT"), "UNION"),
-        Sequence(OneOf("LOOP", "MERGE", "HASH"), "JOIN"),
-        Sequence("EXPAND", "VIEWS"),
-        Sequence(
-            OneOf(
-                "FAST",
-                "MAXDOP",
-                "MAXRECURSION",
-                "QUERYTRACEON",
-                Sequence(
-                    OneOf(
-                        "MAX_GRANT_PERCENT",
-                        "MIN_GRANT_PERCENT",
-                    ),
-                    Ref("EqualsSegment"),
-                ),
-            ),
-            Ref("NumericLiteralSegment"),
-        ),
-        Sequence("FORCE", "ORDER"),
         Sequence(
-            OneOf("FORCE", "DISABLE"),
-            OneOf("EXTERNALPUSHDOWN", "SCALEOUTEXECUTION"),
+            "WHEN",
+            "NOT",
+            "MATCHED",
+            Sequence("BY", "TARGET", optional=True),
+            Sequence("AND", Ref("ExpressionSegment"), optional=True),
+            Indent,
+            "THEN",
+            Ref("MergeInsertClauseSegment"),
+            Dedent,
         ),
         Sequence(
+            "WHEN",
+            "NOT",
+            "MATCHED",
+            "BY",
+            "SOURCE",
+            Sequence("AND", Ref("ExpressionSegment"), optional=True),
+            Indent,
+            "THEN",
             OneOf(
-                "KEEP",
-                "KEEPFIXED",
-                "ROBUST",
+                Ref("MergeUpdateClauseSegment"),
+                Ref("MergeDeleteClauseSegment"),
             ),
-            "PLAN",
+            Dedent,
         ),
-        "IGNORE_NONCLUSTERED_COLUMNSTORE_INDEX",
-        "NO_PERFORMANCE_SPOOL",
-        Sequence(
-            "OPTIMIZE",
-            "FOR",
-            OneOf(
-                "UNKNOWN",
-                Bracketed(
-                    Ref("ParameterNameSegment"),
-                    OneOf(
-                        "UNKNOWN", Sequence(Ref("EqualsSegment"), Ref("LiteralGrammar"))
-                    ),
+    )
+
+
+class MergeInsertClauseSegment(BaseSegment):
+    """`INSERT` clause within the `MERGE` statement."""
+
+    type = "merge_insert_clause"
+    match_grammar = Sequence(
+        "INSERT",
+        Indent,
+        Ref("BracketedColumnReferenceListGrammar", optional=True),
+        Dedent,
+        "VALUES",
+        Indent,
+        OneOf(
+            Bracketed(
+                Delimited(
                     AnyNumberOf(
-                        Ref("CommaSegment"),
-                        Ref("ParameterNameSegment"),
-                        OneOf(
-                            "UNKNOWN",
-                            Sequence(Ref("EqualsSegment"), Ref("LiteralGrammar")),
-                        ),
+                        Ref("ExpressionSegment"),
                     ),
                 ),
             ),
-        ),
-        Sequence("PARAMETERIZATION", OneOf("SIMPLE", "FORCED")),
-        "RECOMPILE",
-        Sequence(
-            "USE",
-            "HINT",
-            Bracketed(
-                Ref("QuotedLiteralSegment"),
-                AnyNumberOf(Ref("CommaSegment"), Ref("QuotedLiteralSegment")),
+            Sequence(
+                "DEFAULT",
+                "VALUES",
             ),
         ),
-        Sequence(
-            "USE",
-            "PLAN",
-            Ref("QuotedLiteralSegmentOptWithN"),
-        ),
-        Sequence(
-            "TABLE",
-            "HINT",
-            Ref("ObjectReferenceSegment"),
-            Delimited(Ref("TableHintSegment")),
-        ),
+        Dedent,
     )
 
 
-class PostTableExpressionGrammar(BaseSegment):
-    """Table Hint clause.  Overloading the PostTableExpressionGrammar to implement.
+class OutputClauseSegment(BaseSegment):
+    """OUTPUT Clause used within DELETE, INSERT, UPDATE, MERGE.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table?view=sql-server-ver15
+    https://docs.microsoft.com/en-us/sql/t-sql/queries/output-clause-transact-sql?view=sql-server-ver15
     """
 
-    type = "post_table_expression"
-    match_grammar = Sequence(
-        Sequence("WITH", optional=True),
-        Bracketed(
-            Ref("TableHintSegment"),
-            AnyNumberOf(
-                Ref("CommaSegment"),
-                Ref("TableHintSegment"),
+    type = "output_clause"
+    match_grammar = AnyNumberOf(
+        Sequence(
+            "OUTPUT",
+            Indent,
+            Delimited(
+                AnyNumberOf(
+                    Ref("WildcardExpressionSegment"),
+                    Sequence(
+                        Ref("BaseExpressionElementGrammar"),
+                        Ref("AliasExpressionSegment", optional=True),
+                    ),
+                    Ref("SingleIdentifierGrammar"),
+                ),
+            ),
+            Dedent,
+            Sequence(
+                "INTO",
+                Indent,
+                Ref("TableReferenceSegment"),
+                Bracketed(
+                    Delimited(
+                        Ref("ColumnReferenceSegment"),
+                    ),
+                    optional=True,
+                ),
+                Dedent,
+                optional=True,
             ),
         ),
     )
 
 
-class TableHintSegment(BaseSegment):
-    """Table Hint segment.
+class ThrowStatementSegment(BaseSegment):
+    """A THROW statement.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table?view=sql-server-ver15
+    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/throw-transact-sql?view=sql-server-ver15
     """
 
-    type = "query_hint_segment"
-    match_grammar = OneOf(
-        "NOEXPAND",
+    type = "throw_statement"
+    match_grammar = Sequence(
+        "THROW",
         Sequence(
-            "INDEX",
-            Bracketed(
-                Delimited(
-                    OneOf(Ref("IndexReferenceSegment"), Ref("NumericLiteralSegment")),
-                ),
+            OneOf(
+                # error_number
+                Ref("NumericLiteralSegment"),
+                Ref("ParameterNameSegment"),
             ),
-        ),
-        Sequence(
-            "INDEX",
-            Ref("EqualsSegment"),
-            Bracketed(
-                OneOf(Ref("IndexReferenceSegment"), Ref("NumericLiteralSegment")),
+            Ref("CommaSegment"),
+            OneOf(
+                # message
+                Ref("QuotedLiteralSegment"),
+                Ref("QuotedLiteralSegmentWithN"),
+                Ref("ParameterNameSegment"),
+            ),
+            Ref("CommaSegment"),
+            OneOf(
+                # state
+                Ref("NumericLiteralSegment"),
+                Ref("ParameterNameSegment"),
             ),
+            optional=True,
         ),
-        "KEEPIDENTITY",
-        "KEEPDEFAULTS",
-        Sequence(
-            "FORCESEEK",
-            Bracketed(
-                Ref("IndexReferenceSegment"),
-                Bracketed(
-                    Ref("SingleIdentifierGrammar"),
-                    AnyNumberOf(Ref("CommaSegment"), Ref("SingleIdentifierGrammar")),
+    )
+
+
+class RaiserrorStatementSegment(BaseSegment):
+    """RAISERROR statement.
+
+    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/raiserror-transact-sql?view=sql-server-ver15
+    """
+
+    type = "raiserror_statement"
+    match_grammar = Sequence(
+        "RAISERROR",
+        Bracketed(
+            Delimited(
+                OneOf(
+                    Ref("NumericLiteralSegment"),
+                    Ref("QuotedLiteralSegment"),
+                    Ref("QuotedLiteralSegmentWithN"),
+                    Ref("ParameterNameSegment"),
+                ),
+                OneOf(
+                    Ref("NumericLiteralSegment"),
+                    Ref("QualifiedNumericLiteralSegment"),
+                    Ref("ParameterNameSegment"),
+                ),
+                OneOf(
+                    Ref("NumericLiteralSegment"),
+                    Ref("QualifiedNumericLiteralSegment"),
+                    Ref("ParameterNameSegment"),
+                ),
+                AnyNumberOf(
+                    Ref("LiteralGrammar"),
+                    Ref("ParameterNameSegment"),
+                    min_times=0,
+                    max_times=20,
                 ),
-                optional=True,
             ),
         ),
-        "FORCESCAN",
-        "HOLDLOCK",
-        "IGNORE_CONSTRAINTS",
-        "IGNORE_TRIGGERS",
-        "NOLOCK",
-        "NOWAIT",
-        "PAGLOCK",
-        "READCOMMITTED",
-        "READCOMMITTEDLOCK",
-        "READPAST",
-        "READUNCOMMITTED",
-        "REPEATABLEREAD",
-        "ROWLOCK",
-        "SERIALIZABLE",
-        "SNAPSHOT",
         Sequence(
-            "SPATIAL_WINDOW_MAX_CELLS",
-            Ref("EqualsSegment"),
-            Ref("NumericLiteralSegment"),
+            "WITH",
+            Delimited(
+                "LOG",
+                "NOWAIT",
+                "SETERROR",
+            ),
+            optional=True,
         ),
-        "TABLOCK",
-        "TABLOCKX",
-        "UPDLOCK",
-        "XLOCK",
     )
 
 
-class SetOperatorSegment(BaseSegment):
-    """A set operator such as Union, Except or Intersect.
+class WindowSpecificationSegment(BaseSegment):
+    """Window specification within OVER(...).
 
-    Override ANSI to remove TSQL non-keyword MINUS.
+    Overriding ANSI to remove window name option not supported by TSQL
     """
 
-    type = "set_operator"
-    match_grammar = OneOf(
-        Sequence("UNION", OneOf("DISTINCT", "ALL", optional=True)),
-        "INTERSECT",
-        "EXCEPT",
+    type = "window_specification"
+    match_grammar = Sequence(
+        Ref("PartitionClauseSegment", optional=True),
+        Ref("OrderByClauseSegment", optional=True),
+        Ref("FrameClauseSegment", optional=True),
+        optional=True,
+        ephemeral_name="OverClauseContent",
     )
 
 
-class SetExpressionSegment(BaseSegment):
-    """A set expression with either Union, Minus, Except or Intersect.
+class GotoStatement(BaseSegment):
+    """GOTO statement.
 
-    Overriding ANSI to include OPTION clause.
+    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/goto-transact-sql?view=sql-server-ver15
     """
 
-    type = "set_expression"
-    # match grammar
-    match_grammar = Sequence(
-        Ref("NonSetSelectableGrammar"),
-        AnyNumberOf(
-            Sequence(
-                Ref("SetOperatorSegment"),
-                Ref("NonSetSelectableGrammar"),
-            ),
-            min_times=1,
-        ),
-        Ref("OrderByClauseSegment", optional=True),
-        Ref("OptionClauseSegment", optional=True),
-        Ref("DelimiterGrammar", optional=True),
-    )
+    type = "goto_statement"
+    match_grammar = Sequence("GOTO", Ref("SingleIdentifierGrammar"))
 
 
-class ExecuteScriptSegment(BaseSegment):
-    """`EXECUTE` statement.
+class CreateTriggerStatementSegment(BaseSegment):
+    """Create Trigger Statement.
 
-    Matching segment name and type from exasol.
-    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/execute-transact-sql?view=sql-server-ver15
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-trigger-transact-sql?view=sql-server-ver15
     """
 
-    type = "execute_script_statement"
-    match_grammar = Sequence(
-        OneOf("EXEC", "EXECUTE"),
-        Sequence(Ref("ParameterNameSegment"), Ref("EqualsSegment"), optional=True),
-        OptionallyBracketed(Ref("ObjectReferenceSegment")),
-        Indent,
+    type = "create_trigger"
+
+    match_grammar: Matchable = Sequence(
+        "CREATE",
+        "TRIGGER",
+        Ref("TriggerReferenceSegment"),
+        "ON",
+        OneOf(
+            Ref("TableReferenceSegment"),
+            Sequence("ALL", "SERVER"),
+            "DATABASE",
+        ),
         Sequence(
-            Sequence(Ref("ParameterNameSegment"), Ref("EqualsSegment"), optional=True),
+            "WITH",
             OneOf(
-                "DEFAULT",
-                Ref("LiteralGrammar"),
-                Ref("ParameterNameSegment"),
-                Ref("SingleIdentifierGrammar"),
-            ),
-            Sequence("OUTPUT", optional=True),
-            AnyNumberOf(
-                Ref("CommaSegment"),
                 Sequence(
-                    Ref("ParameterNameSegment"), Ref("EqualsSegment"), optional=True
+                    Ref.keyword("ENCRYPTION", optional=True),
+                    Sequence(
+                        "EXECUTE",
+                        "AS",
+                        Ref("SingleQuotedIdentifierSegment"),
+                        optional=True,
+                    ),
                 ),
-                OneOf(
-                    "DEFAULT",
-                    Ref("LiteralGrammar"),
-                    Ref("ParameterNameSegment"),
-                    Ref("SingleIdentifierGrammar"),
+                Sequence(
+                    Ref.keyword("NATIVE_COMPILATION", optional=True),
+                    Ref.keyword("SCHEMABINDING", optional=True),
+                    Sequence(
+                        "EXECUTE",
+                        "AS",
+                        Ref("SingleQuotedIdentifierSegment"),
+                        optional=True,
+                    ),
+                ),
+                Sequence(
+                    Ref.keyword("ENCRYPTION", optional=True),
+                    Sequence(
+                        "EXECUTE",
+                        "AS",
+                        Ref("SingleQuotedIdentifierSegment"),
+                        optional=True,
+                    ),
                 ),
-                Sequence("OUTPUT", optional=True),
             ),
             optional=True,
         ),
-        Dedent,
-        Ref("DelimiterGrammar", optional=True),
+        OneOf(
+            Sequence("FOR", Delimited(Ref("SingleIdentifierGrammar"), optional=True)),
+            "AFTER",
+            Sequence("INSTEAD", "OF"),
+            optional=True,
+        ),
+        Delimited(
+            "INSERT",
+            "UPDATE",
+            "DELETE",
+            optional=True,
+        ),
+        Sequence("WITH", "APPEND", optional=True),
+        Sequence("NOT", "FOR", "REPLICATION", optional=True),
+        "AS",
+        Ref("OneOrMoreStatementsGrammar"),
+        # TODO: EXTERNAL NAME
     )
 
 
-class CreateSchemaStatementSegment(BaseSegment):
-    """A `CREATE SCHEMA` statement.
+class DropTriggerStatementSegment(BaseSegment):
+    """Drop Trigger Statement.
+
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/drop-trigger-transact-sql?view=sql-server-ver15
+    """
+
+    type = "drop_trigger"
+
+    match_grammar: Matchable = Sequence(
+        "DROP",
+        "TRIGGER",
+        Ref("IfExistsGrammar", optional=True),
+        Delimited(Ref("TriggerReferenceSegment")),
+        Sequence("ON", OneOf("DATABASE", Sequence("ALL", "SERVER")), optional=True),
+    )
+
 
-    Overriding ANSI to allow for AUTHORIZATION clause
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-schema-transact-sql?view=sql-server-ver15
+class DisableTriggerStatementSegment(BaseSegment):
+    """Disable Trigger Statement.
 
-    Not yet implemented: proper schema_element parsing.
-    Once we have an AccessStatementSegment that works for TSQL, this definition should
-    be tweaked to include schema elements.
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/disable-trigger-transact-sql?view=sql-server-ver15
     """
 
-    type = "create_schema_statement"
-    match_grammar = Sequence(
-        "CREATE",
-        "SCHEMA",
-        Ref("SchemaReferenceSegment"),
-        Sequence(
-            "AUTHORIZATION",
-            Ref("SingleIdentifierGrammar"),
-            optional=True,
+    type = "disable_trigger"
+
+    match_grammar: Matchable = Sequence(
+        "DISABLE",
+        "TRIGGER",
+        OneOf(
+            Delimited(Ref("TriggerReferenceSegment")),
+            "ALL",
         ),
-        Ref(
-            "DelimiterGrammar",
+        Sequence(
+            "ON",
+            OneOf(Ref("ObjectReferenceSegment"), "DATABASE", Sequence("ALL", "SERVER")),
             optional=True,
         ),
     )
 
 
-class MergeMatchSegment(BaseSegment):
-    """Contains dialect specific merge operations."""
+class LabelStatementSegment(BaseSegment):
+    """Label Statement, for a GOTO statement.
 
-    type = "merge_match"
-    match_grammar = Sequence(
-        AnyNumberOf(
-            Ref("MergeMatchedClauseSegment"),
-            Ref("MergeNotMatchedClauseSegment"),
-            min_times=1,
-        ),
-        Ref("OutputClauseSegment", optional=True),
-        Ref("OptionClauseSegment", optional=True),
+    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/goto-transact-sql?view=sql-server-ver15
+    """
+
+    type = "label_segment"
+
+    match_grammar: Matchable = Sequence(
+        Ref("NakedIdentifierSegment"), Ref("ColonSegment"), allow_gaps=False
     )
 
 
-class MergeMatchedClauseSegment(BaseSegment):
-    """The `WHEN MATCHED` clause within a `MERGE` statement."""
+class AccessStatementSegment(BaseSegment):
+    """A `GRANT` or `REVOKE` statement.
 
-    type = "merge_when_matched_clause"
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/grant-transact-sql?view=sql-server-ver15
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/deny-transact-sql?view=sql-server-ver15
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/revoke-transact-sql?view=sql-server-ver15
+    """
 
-    match_grammar = Sequence(
-        "WHEN",
-        "MATCHED",
+    type = "access_statement"
+
+    # Privileges that can be set on the account (specific to snowflake)
+    _global_permissions = OneOf(
         Sequence(
-            "AND",
-            Ref("ExpressionSegment"),
-            optional=True,
-        ),
-        Indent,
-        "THEN",
-        OneOf(
-            Ref("MergeUpdateClauseSegment"),
-            Ref("MergeDeleteClauseSegment"),
+            "CREATE",
+            OneOf(
+                "ROLE",
+                "USER",
+                "WAREHOUSE",
+                "DATABASE",
+                "INTEGRATION",
+            ),
         ),
-        Dedent,
+        Sequence("APPLY", "MASKING", "POLICY"),
+        "EXECUTE",
     )
 
+    _schema_object_names = [
+        "TABLE",
+        "VIEW",
+        "FUNCTION",
+        "PROCEDURE",
+        "SEQUENCE",
+    ]
 
-class MergeNotMatchedClauseSegment(BaseSegment):
-    """The `WHEN NOT MATCHED` clause within a `MERGE` statement."""
+    _schema_object_types = OneOf(
+        *_schema_object_names,
+        Sequence("EXTERNAL", "TABLE"),
+        Sequence("FILE", "FORMAT"),
+    )
 
-    type = "merge_when_not_matched_clause"
+    # We reuse the object names above and simply append an `S` to the end of them to get
+    # plurals
+    _schema_object_types_plural = OneOf(
+        *[f"{object_name}S" for object_name in _schema_object_names]
+    )
 
-    match_grammar = OneOf(
+    _permissions = Sequence(
+        OneOf(
+            "ALTER",
+            "CONTROL",
+            "DELETE",
+            "EXECUTE",
+            "INSERT",
+            "RECEIVE",
+            "REFERENCES",
+            "SELECT",
+            Sequence("TAKE", "OWNERSHIP"),
+            "UPDATE",
+            Sequence("VIEW", "CHANGE", "TRACKING"),
+            Sequence("VIEW", "DEFINITION"),
+        ),
+        Ref("BracketedColumnReferenceListGrammar", optional=True),
+    )
+
+    # All of the object types that we can grant permissions on.
+    # This list will contain ansi sql objects as well as dialect specific ones.
+    _objects = Sequence(
+        OneOf(
+            "DATABASE",
+            "LANGUAGE",
+            "SCHEMA",
+            "ROLE",
+            "TYPE",
+            Sequence(
+                "FOREIGN",
+                OneOf("SERVER", Sequence("DATA", "WRAPPER")),
+            ),
+            Sequence("ALL", "SCHEMAS", "IN", "DATABASE"),
+            _schema_object_types,
+            Sequence("ALL", _schema_object_types_plural, "IN", "SCHEMA"),
+            optional=True,
+        ),
+        Delimited(Ref("ObjectReferenceSegment"), terminator=OneOf("TO", "FROM")),
+        Ref("FunctionParameterListGrammar", optional=True),
+    )
+
+    match_grammar: Matchable = OneOf(
+        # Based on https://www.postgresql.org/docs/13/sql-grant.html
+        # and https://docs.snowflake.com/en/sql-reference/sql/grant-privilege.html
         Sequence(
-            "WHEN",
-            "NOT",
-            "MATCHED",
-            Sequence("BY", "TARGET", optional=True),
-            Sequence("AND", Ref("ExpressionSegment"), optional=True),
-            Indent,
-            "THEN",
-            Ref("MergeInsertClauseSegment"),
-            Dedent,
+            "GRANT",
+            OneOf(
+                Sequence(
+                    Delimited(
+                        OneOf(_global_permissions, _permissions),
+                        terminator="ON",
+                    ),
+                ),
+                Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)),
+            ),
+            "ON",
+            Sequence(
+                OneOf("LOGIN", "DATABASE", "OBJECT", "ROLE", "SCHEMA", "USER"),
+                Ref("CastOperatorSegment"),
+                optional=True,
+            ),
+            _objects,
+            "TO",
+            Delimited(
+                OneOf(Ref("RoleReferenceSegment"), Ref("FunctionSegment")),
+            ),
+            OneOf(
+                Sequence("WITH", "GRANT", "OPTION"),
+                optional=True,
+            ),
+            Sequence(
+                "AS",
+                Ref("ObjectReferenceSegment"),
+                optional=True,
+            ),
         ),
         Sequence(
-            "WHEN",
-            "NOT",
-            "MATCHED",
-            "BY",
-            "SOURCE",
-            Sequence("AND", Ref("ExpressionSegment"), optional=True),
-            Indent,
-            "THEN",
+            "DENY",
             OneOf(
-                Ref("MergeUpdateClauseSegment"),
-                Ref("MergeDeleteClauseSegment"),
+                Delimited(
+                    OneOf(_global_permissions, _permissions),
+                    terminator="ON",
+                ),
+                Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)),
+            ),
+            "ON",
+            Sequence(
+                OneOf("LOGIN", "DATABASE", "OBJECT", "ROLE", "SCHEMA", "USER"),
+                Ref("CastOperatorSegment"),
+                optional=True,
+            ),
+            _objects,
+            OneOf("TO"),
+            Delimited(
+                Ref("RoleReferenceSegment"),
+            ),
+            Sequence(
+                Ref.keyword("CASCADE", optional=True),
+                Ref("ObjectReferenceSegment", optional=True),
+                optional=True,
             ),
-            Dedent,
         ),
-    )
-
-
-class MergeInsertClauseSegment(BaseSegment):
-    """`INSERT` clause within the `MERGE` statement."""
-
-    type = "merge_insert_clause"
-    match_grammar = Sequence(
-        "INSERT",
-        Indent,
-        Ref("BracketedColumnReferenceListGrammar", optional=True),
-        Dedent,
-        "VALUES",
-        Indent,
-        OneOf(
-            Bracketed(
+        Sequence(
+            "REVOKE",
+            Sequence("GRANT", "OPTION", "FOR", optional=True),
+            OneOf(
                 Delimited(
-                    AnyNumberOf(
-                        Ref("ExpressionSegment"),
-                    ),
+                    OneOf(_global_permissions, _permissions),
+                    terminator="ON",
                 ),
+                Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)),
+            ),
+            "ON",
+            Sequence(
+                OneOf("LOGIN", "DATABASE", "OBJECT", "ROLE", "SCHEMA", "USER"),
+                Ref("CastOperatorSegment"),
+                optional=True,
+            ),
+            _objects,
+            OneOf("TO", "FROM"),
+            Delimited(
+                Ref("RoleReferenceSegment"),
             ),
             Sequence(
-                "DEFAULT",
-                "VALUES",
+                Ref.keyword("CASCADE", optional=True),
+                Ref("ObjectReferenceSegment", optional=True),
+                optional=True,
             ),
         ),
-        Dedent,
     )
 
 
-class OutputClauseSegment(BaseSegment):
-    """OUTPUT Clause used within DELETE, INSERT, UPDATE, MERGE.
+class CreateTypeStatementSegment(BaseSegment):
+    """A `CREATE TYPE` statement.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/queries/output-clause-transact-sql?view=sql-server-ver15
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-type-transact-sql?view=sql-server-ver15
     """
 
-    type = "output_clause"
-    match_grammar = AnyNumberOf(
-        Sequence(
-            "OUTPUT",
-            Indent,
-            Delimited(
-                AnyNumberOf(
-                    Ref("WildcardExpressionSegment"),
-                    Sequence(
-                        Ref("BaseExpressionElementGrammar"),
-                        Ref("AliasExpressionSegment", optional=True),
-                    ),
-                    Ref("SingleIdentifierGrammar"),
-                ),
-            ),
-            Dedent,
+    type = "create_type_statement"
+    match_grammar: Matchable = Sequence(
+        "CREATE",
+        "TYPE",
+        Ref("ObjectReferenceSegment"),
+        OneOf(
+            Sequence("FROM", Ref("ObjectReferenceSegment")),
             Sequence(
-                "INTO",
-                Indent,
-                Ref("TableReferenceSegment"),
-                Bracketed(
-                    Delimited(
-                        Ref("ColumnReferenceSegment"),
+                "AS",
+                "TABLE",
+                Sequence(
+                    Bracketed(
+                        Delimited(
+                            OneOf(
+                                Ref("TableConstraintSegment"),
+                                Ref("ColumnDefinitionSegment"),
+                                Ref("TableIndexSegment"),
+                            ),
+                            allow_trailing=True,
+                        )
                     ),
-                    optional=True,
                 ),
-                Dedent,
-                optional=True,
             ),
         ),
     )
 
 
-class ThrowStatementSegment(BaseSegment):
-    """A THROW statement.
+class OpenCursorStatementSegment(BaseSegment):
+    """An `OPEN` cursor statement.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/throw-transact-sql?view=sql-server-ver15
+    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/open-transact-sql?view=sql-server-ver15
     """
 
-    type = "throw_statement"
-    match_grammar = Sequence(
-        "THROW",
-        Sequence(
-            OneOf(
-                # error_number
-                Ref("NumericLiteralSegment"),
-                Ref("ParameterNameSegment"),
-            ),
-            Ref("CommaSegment"),
-            OneOf(
-                # message
-                Ref("QuotedLiteralSegment"),
-                Ref("QuotedLiteralSegmentWithN"),
-                Ref("ParameterNameSegment"),
-            ),
-            Ref("CommaSegment"),
-            OneOf(
-                # state
-                Ref("NumericLiteralSegment"),
-                Ref("ParameterNameSegment"),
-            ),
-            optional=True,
-        ),
+    type = "open_cursor_statement"
+    match_grammar: Matchable = Sequence(
+        "OPEN",
+        Ref("CursorNameGrammar"),
     )
 
 
-class RaiserrorStatementSegment(BaseSegment):
-    """RAISERROR statement.
+class CloseCursorStatementSegment(BaseSegment):
+    """A `CLOSE` cursor statement.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/raiserror-transact-sql?view=sql-server-ver15
+    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/close-transact-sql?view=sql-server-ver15
     """
 
-    type = "raiserror_statement"
-    match_grammar = Sequence(
-        "RAISERROR",
-        Bracketed(
-            Delimited(
-                OneOf(
-                    Ref("NumericLiteralSegment"),
-                    Ref("QuotedLiteralSegment"),
-                    Ref("QuotedLiteralSegmentWithN"),
-                    Ref("ParameterNameSegment"),
-                ),
-                OneOf(
-                    Ref("NumericLiteralSegment"),
-                    Ref("QualifiedNumericLiteralSegment"),
-                    Ref("ParameterNameSegment"),
-                ),
-                OneOf(
-                    Ref("NumericLiteralSegment"),
-                    Ref("QualifiedNumericLiteralSegment"),
-                    Ref("ParameterNameSegment"),
-                ),
-                AnyNumberOf(
-                    Ref("LiteralGrammar"),
-                    Ref("ParameterNameSegment"),
-                    min_times=0,
-                    max_times=20,
-                ),
-            ),
-        ),
-        Sequence(
-            "WITH",
-            Delimited(
-                "LOG",
-                "NOWAIT",
-                "SETERROR",
-            ),
-            optional=True,
-        ),
+    type = "close_cursor_statement"
+    match_grammar: Matchable = Sequence(
+        "CLOSE",
+        Ref("CursorNameGrammar"),
     )
 
 
-class WindowSpecificationSegment(BaseSegment):
-    """Window specification within OVER(...).
+class DeallocateCursorStatementSegment(BaseSegment):
+    """A `DEALLOCATE` cursor statement.
 
-    Overriding ANSI to remove window name option not supported by TSQL
+    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/deallocate-transact-sql?view=sql-server-ver15
     """
 
-    type = "window_specification"
-    match_grammar = Sequence(
-        Ref("PartitionClauseSegment", optional=True),
-        Ref("OrderByClauseSegment", optional=True),
-        Ref("FrameClauseSegment", optional=True),
-        optional=True,
-        ephemeral_name="OverClauseContent",
+    type = "deallocate_cursor_statement"
+    match_grammar: Matchable = Sequence(
+        "DEALLOCATE",
+        Ref("CursorNameGrammar"),
     )
 
 
-class GotoStatement(BaseSegment):
-    """GOTO statement.
+class FetchCursorStatementSegment(BaseSegment):
+    """A `FETCH` cursor statement.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/goto-transact-sql?view=sql-server-ver15
+    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/fetch-transact-sql?view=sql-server-ver15
     """
 
-    type = "goto_statement"
-    match_grammar = Sequence("GOTO", Ref("SingleIdentifierGrammar"))
+    type = "fetch_cursor_statement"
+    match_grammar: Matchable = Sequence(
+        "FETCH",
+        OneOf("NEXT", "PRIOR", "FIRST", "LAST", optional=True),
+        "FROM",
+        Ref("CursorNameGrammar"),
+        Sequence("INTO", Delimited(Ref("ParameterNameSegment")), optional=True),
+    )
 
 
-class CreateTriggerStatementSegment(BaseSegment):
-    """Create Trigger Statement.
+class ConcatSegment(ansi.CompositeBinaryOperatorSegment):
+    """Concat operator."""
 
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-trigger-transact-sql?view=sql-server-ver15
-    """
+    match_grammar: Matchable = Ref("PlusSegment")
 
-    type = "create_trigger"
 
+class CreateSynonymStatementSegment(BaseSegment):
+    """A `CREATE SYNONYM` statement."""
+
+    type = "create_synonym_statement"
+    # https://learn.microsoft.com/en-us/sql/t-sql/statements/create-synonym-transact-sql
     match_grammar: Matchable = Sequence(
         "CREATE",
-        "TRIGGER",
-        Ref("TriggerReferenceSegment"),
-        "ON",
-        OneOf(
-            Ref("TableReferenceSegment"),
-            Sequence("ALL", "SERVER"),
-            "DATABASE",
-        ),
-        Sequence(
-            "WITH",
-            OneOf(
-                Sequence(
-                    Ref.keyword("ENCRYPTION", optional=True),
-                    Sequence(
-                        "EXECUTE",
-                        "AS",
-                        Ref("SingleQuotedIdentifierSegment"),
-                        optional=True,
-                    ),
-                ),
-                Sequence(
-                    Ref.keyword("NATIVE_COMPILATION", optional=True),
-                    Ref.keyword("SCHEMABINDING", optional=True),
-                    Sequence(
-                        "EXECUTE",
-                        "AS",
-                        Ref("SingleQuotedIdentifierSegment"),
-                        optional=True,
-                    ),
-                ),
-                Sequence(
-                    Ref.keyword("ENCRYPTION", optional=True),
-                    Sequence(
-                        "EXECUTE",
-                        "AS",
-                        Ref("SingleQuotedIdentifierSegment"),
-                        optional=True,
-                    ),
-                ),
-            ),
-            optional=True,
-        ),
-        OneOf(
-            Sequence("FOR", Delimited(Ref("SingleIdentifierGrammar"), optional=True)),
-            "AFTER",
-            Sequence("INSTEAD", "OF"),
-            optional=True,
-        ),
-        Delimited(
-            "INSERT",
-            "UPDATE",
-            "DELETE",
-            optional=True,
-        ),
-        Sequence("WITH", "APPEND", optional=True),
-        Sequence("NOT", "FOR", "REPLICATION", optional=True),
-        "AS",
-        Ref("OneOrMoreStatementsGrammar"),
-        # TODO: EXTERNAL NAME
+        "SYNONYM",
+        Ref("SynonymReferenceSegment"),
+        "FOR",
+        Ref("ObjectReferenceSegment"),
     )
 
 
-class DropTriggerStatementSegment(BaseSegment):
-    """Drop Trigger Statement.
-
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/drop-trigger-transact-sql?view=sql-server-ver15
-    """
-
-    type = "drop_trigger"
+class DropSynonymStatementSegment(BaseSegment):
+    """A `DROP SYNONYM` statement."""
 
+    type = "drop_synonym_statement"
+    # https://learn.microsoft.com/en-us/sql/t-sql/statements/drop-synonym-transact-sql
     match_grammar: Matchable = Sequence(
         "DROP",
-        "TRIGGER",
+        "SYNONYM",
         Ref("IfExistsGrammar", optional=True),
-        Delimited(Ref("TriggerReferenceSegment")),
-        Sequence("ON", OneOf("DATABASE", Sequence("ALL", "SERVER")), optional=True),
+        Ref("SynonymReferenceSegment"),
     )
 
 
-class DisableTriggerStatementSegment(BaseSegment):
-    """Disable Trigger Statement.
+class SynonymReferenceSegment(ansi.ObjectReferenceSegment):
+    """A reference to a synonym.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/disable-trigger-transact-sql?view=sql-server-ver15
+    A synonym may only (optionally) specify a schema. It may not specify a server
+    or database name.
     """
 
-    type = "disable_trigger"
+    type = "synonym_reference"
+    # match grammar (allow whitespace)
+    match_grammar: Matchable = Sequence(
+        Ref("SingleIdentifierGrammar"),
+        AnyNumberOf(
+            Sequence(
+                Ref("DotSegment"),
+                Ref("SingleIdentifierGrammar", optional=True),
+            ),
+            min_times=0,
+            max_times=1,
+        ),
+    )
+
 
+class SamplingExpressionSegment(ansi.SamplingExpressionSegment):
+    """Override ANSI to use TSQL TABLESAMPLE expression."""
+
+    type = "sample_expression"
     match_grammar: Matchable = Sequence(
-        "DISABLE",
-        "TRIGGER",
-        OneOf(
-            Delimited(Ref("TriggerReferenceSegment")),
-            "ALL",
+        "TABLESAMPLE",
+        Sequence("SYSTEM", optional=True),
+        Bracketed(
+            Sequence(
+                Ref("NumericLiteralSegment"), OneOf("PERCENT", "ROWS", optional=True)
+            )
         ),
         Sequence(
-            "ON",
-            OneOf(Ref("ObjectReferenceSegment"), "DATABASE", Sequence("ALL", "SERVER")),
+            OneOf("REPEATABLE"),
+            Bracketed(Ref("NumericLiteralSegment")),
             optional=True,
         ),
     )
 
 
-class LabelStatementSegment(BaseSegment):
-    """Label Statement, for a GOTO statement.
+class TemporalQuerySegment(BaseSegment):
+    """A segment that allows Temporal Queries to be run.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/goto-transact-sql?view=sql-server-ver15
+    https://learn.microsoft.com/en-us/sql/relational-databases/tables/temporal-tables?view=sql-server-ver16
     """
 
-    type = "label_segment"
+    type = "temporal_query"
 
     match_grammar: Matchable = Sequence(
-        Ref("NakedIdentifierSegment"), Ref("ColonSegment"), allow_gaps=False
+        "FOR",
+        "SYSTEM_TIME",
+        OneOf(
+            "ALL",
+            Sequence(
+                "AS",
+                "OF",
+                Ref("QuotedLiteralSegment"),
+            ),
+            Sequence(
+                "FROM",
+                Ref("QuotedLiteralSegment"),
+                "TO",
+                Ref("QuotedLiteralSegment"),
+            ),
+            Sequence(
+                "BETWEEN",
+                Ref("QuotedLiteralSegment"),
+                "AND",
+                Ref("QuotedLiteralSegment"),
+            ),
+            Sequence(
+                "CONTAINED",
+                "IN",
+                Bracketed(
+                    Delimited(
+                        Ref("QuotedLiteralSegment"),
+                    )
+                ),
+            ),
+        ),
     )
 
 
-class AccessStatementSegment(BaseSegment):
-    """A `GRANT` or `REVOKE` statement.
+class CreateDatabaseScopedCredentialStatementSegment(BaseSegment):
+    """A statement to create a database scoped credential.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/grant-transact-sql?view=sql-server-ver15
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/deny-transact-sql?view=sql-server-ver15
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/revoke-transact-sql?view=sql-server-ver15
+    https://learn.microsoft.com/en-us/sql/t-sql/statements/create-database-scoped-credential-transact-sql?view=sql-server-ver16
     """
 
-    type = "access_statement"
+    type = "create_database_scoped_credential_statement"
 
-    # Privileges that can be set on the account (specific to snowflake)
-    _global_permissions = OneOf(
+    match_grammar: Matchable = Sequence(
+        "CREATE",
+        "DATABASE",
+        "SCOPED",
+        "CREDENTIAL",
+        Ref("ObjectReferenceSegment"),
+        "WITH",
+        "IDENTITY",
+        Ref("EqualsSegment"),
+        Ref("QuotedLiteralSegment"),
         Sequence(
-            "CREATE",
-            OneOf(
-                "ROLE",
-                "USER",
-                "WAREHOUSE",
-                "DATABASE",
-                "INTEGRATION",
-            ),
+            Ref("CommaSegment"),
+            "SECRET",
+            Ref("EqualsSegment"),
+            Ref("QuotedLiteralSegment"),
+            optional=True,
         ),
-        Sequence("APPLY", "MASKING", "POLICY"),
-        "EXECUTE",
     )
 
-    _schema_object_names = [
-        "TABLE",
-        "VIEW",
-        "FUNCTION",
-        "PROCEDURE",
-        "SEQUENCE",
-    ]
-
-    _schema_object_types = OneOf(
-        *_schema_object_names,
-        Sequence("EXTERNAL", "TABLE"),
-        Sequence("FILE", "FORMAT"),
-    )
 
-    # We reuse the object names above and simply append an `S` to the end of them to get
-    # plurals
-    _schema_object_types_plural = OneOf(
-        *[f"{object_name}S" for object_name in _schema_object_names]
-    )
+class CreateExternalDataSourceStatementSegment(BaseSegment):
+    """A statement to create an external data source.
 
-    _permissions = Sequence(
-        OneOf(
-            "ALTER",
-            "CONTROL",
-            "DELETE",
-            "EXECUTE",
-            "INSERT",
-            "RECEIVE",
-            "REFERENCES",
-            "SELECT",
-            Sequence("TAKE", "OWNERSHIP"),
-            "UPDATE",
-            Sequence("VIEW", "CHANGE", "TRACKING"),
-            Sequence("VIEW", "DEFINITION"),
-        ),
-        Ref("BracketedColumnReferenceListGrammar", optional=True),
-    )
+    https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-data-source-transact-sql?view=sql-server-ver16&tabs=dedicated#syntax
+    """
 
-    # All of the object types that we can grant permissions on.
-    # This list will contain ansi sql objects as well as dialect specific ones.
-    _objects = Sequence(
-        OneOf(
-            "DATABASE",
-            "LANGUAGE",
-            "SCHEMA",
-            "ROLE",
-            "TYPE",
-            Sequence(
-                "FOREIGN",
-                OneOf("SERVER", Sequence("DATA", "WRAPPER")),
-            ),
-            Sequence("ALL", "SCHEMAS", "IN", "DATABASE"),
-            _schema_object_types,
-            Sequence("ALL", _schema_object_types_plural, "IN", "SCHEMA"),
-            optional=True,
-        ),
-        Delimited(Ref("ObjectReferenceSegment"), terminator=OneOf("TO", "FROM")),
-        Ref("FunctionParameterListGrammar", optional=True),
-    )
+    type = "create_external_data_source_statement"
 
-    match_grammar: Matchable = OneOf(
-        # Based on https://www.postgresql.org/docs/13/sql-grant.html
-        # and https://docs.snowflake.com/en/sql-reference/sql/grant-privilege.html
-        Sequence(
-            "GRANT",
-            OneOf(
+    match_grammar: Matchable = Sequence(
+        "CREATE",
+        "EXTERNAL",
+        "DATA",
+        "SOURCE",
+        Ref("ObjectReferenceSegment"),
+        "WITH",
+        Bracketed(
+            Delimited(
+                Ref("TableLocationClause"),
                 Sequence(
-                    Delimited(
-                        OneOf(_global_permissions, _permissions),
-                        terminator="ON",
-                    ),
+                    "CONNECTION_OPTIONS",
+                    Ref("EqualsSegment"),
+                    AnyNumberOf(Ref("QuotedLiteralSegmentOptWithN")),
                 ),
-                Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)),
-            ),
-            "ON",
-            Sequence(
-                OneOf("LOGIN", "DATABASE", "OBJECT", "ROLE", "SCHEMA", "USER"),
-                Ref("CastOperatorSegment"),
-                optional=True,
-            ),
-            _objects,
-            "TO",
-            Delimited(
-                OneOf(Ref("RoleReferenceSegment"), Ref("FunctionSegment")),
-            ),
-            OneOf(
-                Sequence("WITH", "GRANT", "OPTION"),
-                optional=True,
-            ),
-            Sequence(
-                "AS",
-                Ref("ObjectReferenceSegment"),
-                optional=True,
-            ),
-        ),
-        Sequence(
-            "DENY",
-            OneOf(
-                Delimited(
-                    OneOf(_global_permissions, _permissions),
-                    terminator="ON",
+                Sequence(
+                    "CREDENTIAL",
+                    Ref("EqualsSegment"),
+                    Ref("ObjectReferenceSegment"),
+                ),
+                Sequence(
+                    "PUSHDOWN",
+                    Ref("EqualsSegment"),
+                    OneOf("ON", "OFF"),
                 ),
-                Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)),
-            ),
-            "ON",
-            Sequence(
-                OneOf("LOGIN", "DATABASE", "OBJECT", "ROLE", "SCHEMA", "USER"),
-                Ref("CastOperatorSegment"),
-                optional=True,
             ),
-            _objects,
-            OneOf("TO"),
+        ),
+    )
+
+
+class PeriodSegment(BaseSegment):
+    """A `PERIOD FOR SYSTEM_TIME` for `CREATE TABLE` of temporal tables.
+
+    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
+    https://learn.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver16#generated-always-as--row--transaction_id--sequence_number----start--end---hidden---not-null-
+    """
+
+    type = "period_segment"
+    match_grammar = Sequence(
+        "PERIOD",
+        "FOR",
+        "SYSTEM_TIME",
+        Bracketed(
             Delimited(
-                Ref("RoleReferenceSegment"),
-            ),
-            Sequence(
-                Ref.keyword("CASCADE", optional=True),
-                Ref("ObjectReferenceSegment", optional=True),
-                optional=True,
+                Ref("ColumnReferenceSegment"),
+                Ref("ColumnReferenceSegment"),
             ),
         ),
-        Sequence(
-            "REVOKE",
-            Sequence("GRANT", "OPTION", "FOR", optional=True),
-            OneOf(
-                Delimited(
-                    OneOf(_global_permissions, _permissions),
-                    terminator="ON",
-                ),
-                Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)),
-            ),
-            "ON",
+    )
+
+
+class SqlcmdCommandSegment(BaseSegment):
+    """A `sqlcmd` command.
+
+    Microsoft allows professional CI/CD deployment through so called 'SQL Database
+    Projects'.
+    There are propietary `sqlcmd Commands` that can be part of an SQL file.
+    https://learn.microsoft.com/en-us/sql/tools/sqlcmd/sqlcmd-utility?view=sql-server-ver16#sqlcmd-commands
+    """
+
+    type = "sqlcmd_command_segment"
+
+    match_grammar: Matchable = OneOf(
+        Sequence(
             Sequence(
-                OneOf("LOGIN", "DATABASE", "OBJECT", "ROLE", "SCHEMA", "USER"),
-                Ref("CastOperatorSegment"),
-                optional=True,
-            ),
-            _objects,
-            OneOf("TO", "FROM"),
-            Delimited(
-                Ref("RoleReferenceSegment"),
+                Ref("ColonSegment"),
+                Ref("SqlcmdOperatorSegment"),  # `:r`
+                allow_gaps=False,
             ),
+            Ref("SqlcmdFilePathSegment"),
+        ),
+        Sequence(
             Sequence(
-                Ref.keyword("CASCADE", optional=True),
-                Ref("ObjectReferenceSegment", optional=True),
-                optional=True,
+                Ref("ColonSegment"),
+                Ref("SqlcmdOperatorSegment"),  # `:setvar`
+                allow_gaps=False,
             ),
+            Ref("ObjectReferenceSegment"),
+            Ref("CodeSegment"),
         ),
     )
 
 
-class CreateTypeStatementSegment(BaseSegment):
-    """A `CREATE TYPE` statement.
+class ExternalFileFormatDelimitedTextFormatOptionClause(BaseSegment):
+    """`CREATE EXTERNAL FILE FORMAT` Delimited text `FORMAT_OPTIONS` clause."""
 
-    https://docs.microsoft.com/en-us/sql/t-sql/statements/create-type-transact-sql?view=sql-server-ver15
-    """
+    type = "external_file_delimited_text_format_options_clause"
 
-    type = "create_type_statement"
-    match_grammar: Matchable = Sequence(
-        "CREATE",
-        "TYPE",
-        Ref("ObjectReferenceSegment"),
-        OneOf(
-            Sequence("FROM", Ref("ObjectReferenceSegment")),
-            Sequence(
-                "AS",
-                "TABLE",
-                Sequence(
-                    Bracketed(
-                        Delimited(
-                            OneOf(
-                                Ref("TableConstraintSegment"),
-                                Ref("ColumnDefinitionSegment"),
-                                Ref("TableIndexSegment"),
-                            ),
-                            allow_trailing=True,
-                        )
-                    ),
-                ),
+    match_grammar = OneOf(
+        Sequence(
+            OneOf(
+                "FIELD_TERMINATOR", "STRING_DELIMITER", "DATE_FORMAT", "PARSER_VERSION"
             ),
+            Ref("EqualsSegment"),
+            Ref("QuotedLiteralSegment"),
+        ),
+        Sequence(
+            "FIRST_ROW",
+            Ref("EqualsSegment"),
+            Ref("NumericLiteralSegment"),
+        ),
+        Sequence(
+            "USE_TYPE_DEFAULT",
+            Ref("EqualsSegment"),
+            Ref("BooleanLiteralGrammar"),
+        ),
+        Sequence(
+            "ENCODING",
+            Ref("EqualsSegment"),
+            Ref("FileEncodingSegment"),
         ),
     )
 
 
-class OpenCursorStatementSegment(BaseSegment):
-    """An `OPEN` cursor statement.
+class ExternalFileFormatDelimitedTextClause(BaseSegment):
+    """`CREATE EXTERNAL FILE FORMAT` *Delimited text* clause.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/open-transact-sql?view=sql-server-ver15
+    https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=delimited#syntax
     """
 
-    type = "open_cursor_statement"
-    match_grammar: Matchable = Sequence(
-        "OPEN",
-        Ref("CursorNameGrammar"),
+    type = "external_file_delimited_text_clause"
+
+    match_grammar = Delimited(
+        Sequence(
+            "FORMAT_TYPE",
+            Ref("EqualsSegment"),
+            "DELIMITEDTEXT",
+        ),
+        Sequence(
+            "FORMAT_OPTIONS",
+            Bracketed(
+                Delimited(
+                    Ref("ExternalFileFormatDelimitedTextFormatOptionClause"),
+                ),
+            ),
+            optional=True,
+        ),
+        Sequence(
+            "DATA_COMPRESSION",
+            Ref("EqualsSegment"),
+            Ref("FileCompressionSegment"),
+            optional=True,
+        ),
     )
 
 
-class CloseCursorStatementSegment(BaseSegment):
-    """A `CLOSE` cursor statement.
+class ExternalFileFormatRcClause(BaseSegment):
+    """`CREATE EXTERNAL FILE FORMAT` *Record Columnar file format (RcFile)* clause.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/close-transact-sql?view=sql-server-ver15
+    https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=rc#syntax
     """
 
-    type = "close_cursor_statement"
-    match_grammar: Matchable = Sequence(
-        "CLOSE",
-        Ref("CursorNameGrammar"),
+    type = "external_file_rc_clause"
+
+    match_grammar = Delimited(
+        Sequence(
+            "FORMAT_TYPE",
+            Ref("EqualsSegment"),
+            "RCFILE",
+        ),
+        Sequence(
+            "SERDE_METHOD",
+            Ref("EqualsSegment"),
+            Ref("SerdeMethodSegment"),
+        ),
+        Sequence(
+            "DATA_COMPRESSION",
+            Ref("EqualsSegment"),
+            Ref("FileCompressionSegment"),
+            optional=True,
+        ),
     )
 
 
-class DeallocateCursorStatementSegment(BaseSegment):
-    """A `DEALLOCATE` cursor statement.
+class ExternalFileFormatOrcClause(BaseSegment):
+    """`CREATE EXTERNAL FILE FORMAT` *Optimized Row Columnar (ORC)* format clause.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/deallocate-transact-sql?view=sql-server-ver15
+    https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=orc#syntax
     """
 
-    type = "deallocate_cursor_statement"
-    match_grammar: Matchable = Sequence(
-        "DEALLOCATE",
-        Ref("CursorNameGrammar"),
+    type = "external_file_orc_clause"
+
+    match_grammar = Delimited(
+        Sequence(
+            "FORMAT_TYPE",
+            Ref("EqualsSegment"),
+            "ORC",
+        ),
+        Sequence(
+            "DATA_COMPRESSION",
+            Ref("EqualsSegment"),
+            Ref("FileCompressionSegment"),
+            optional=True,
+        ),
     )
 
 
-class FetchCursorStatementSegment(BaseSegment):
-    """A `FETCH` cursor statement.
+class ExternalFileFormatParquetClause(BaseSegment):
+    """`CREATE EXTERNAL FILE FORMAT` *PARQUET* format clause.
 
-    https://docs.microsoft.com/en-us/sql/t-sql/language-elements/fetch-transact-sql?view=sql-server-ver15
+    https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=parquet#syntax
     """
 
-    type = "fetch_cursor_statement"
-    match_grammar: Matchable = Sequence(
-        "FETCH",
-        OneOf("NEXT", "PRIOR", "FIRST", "LAST", optional=True),
-        "FROM",
-        Ref("CursorNameGrammar"),
-        Sequence("INTO", Delimited(Ref("ParameterNameSegment")), optional=True),
+    type = "external_file_parquet_clause"
+
+    match_grammar = Delimited(
+        Sequence(
+            "FORMAT_TYPE",
+            Ref("EqualsSegment"),
+            "PARQUET",
+        ),
+        Sequence(
+            "DATA_COMPRESSION",
+            Ref("EqualsSegment"),
+            Ref("FileCompressionSegment"),
+            optional=True,
+        ),
     )
 
 
-class ForXmlSegment(BaseSegment):
-    """A segment for `FOR XML` in `SELECT` statements.
+class ExternalFileFormatJsonClause(BaseSegment):
+    """`CREATE EXTERNAL FILE FORMAT` *JSON* format clause.
 
-    https://docs.microsoft.com/en-us/sql/relational-databases/xml/for-xml-sql-server?view=sql-server-2017
+    https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=json#syntax
     """
 
-    type = "for_xml_segment"
-    match_grammar: Matchable = Sequence(
-        "FOR",
-        "XML",
-        OneOf(
-            Sequence("RAW", Bracketed(Ref("QuotedLiteralSegment"), optional=True)),
-            "AUTO",
-            "EXPLICIT",
-            Sequence("PATH", Bracketed(Ref("QuotedLiteralSegment"), optional=True)),
+    type = "external_file_json_clause"
+
+    match_grammar = Delimited(
+        Sequence(
+            "FORMAT_TYPE",
+            Ref("EqualsSegment"),
+            "JSON",
+        ),
+        Sequence(
+            "DATA_COMPRESSION",
+            Ref("EqualsSegment"),
+            Ref("FileCompressionSegment"),
+            optional=True,
         ),
     )
 
 
-class ConcatSegment(ansi.CompositeBinaryOperatorSegment):
-    """Concat operator."""
+class ExternalFileFormatDeltaClause(BaseSegment):
+    """`CREATE EXTERNAL FILE FORMAT` *Delta Lake* format clause.
 
-    match_grammar: Matchable = Ref("PlusSegment")
+    https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=delta#syntax
+    """
 
+    type = "external_file_delta_clause"
 
-class CreateSynonymStatementSegment(BaseSegment):
-    """A `CREATE SYNONYM` statement."""
+    match_grammar = Sequence(
+        "FORMAT_TYPE",
+        Ref("EqualsSegment"),
+        "DELTA",
+    )
+
+
+class CreateExternalFileFormat(BaseSegment):
+    """A statement to create an `EXTERNAL FILE FORMAT` object.
+
+    https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=delta#syntax
+    """
+
+    type = "create_external_file_format"
 
-    type = "create_synonym_statement"
-    # https://learn.microsoft.com/en-us/sql/t-sql/statements/create-synonym-transact-sql
     match_grammar: Matchable = Sequence(
         "CREATE",
-        "SYNONYM",
-        Ref("SynonymReferenceSegment"),
-        "FOR",
+        "EXTERNAL",
+        "FILE",
+        "FORMAT",
         Ref("ObjectReferenceSegment"),
+        "WITH",
+        Bracketed(
+            OneOf(
+                Ref("ExternalFileFormatDelimitedTextClause"),
+                Ref("ExternalFileFormatRcClause"),
+                Ref("ExternalFileFormatOrcClause"),
+                Ref("ExternalFileFormatParquetClause"),
+                Ref("ExternalFileFormatJsonClause"),
+                Ref("ExternalFileFormatDeltaClause"),
+            ),
+        ),
     )
 
 
-class DropSynonymStatementSegment(BaseSegment):
-    """A `DROP SYNONYM` statement."""
+class OpenJsonWithClauseSegment(BaseSegment):
+    """A `WITH` clause of an `OPENJSON()` table-valued function.
 
-    type = "drop_synonym_statement"
-    # https://learn.microsoft.com/en-us/sql/t-sql/statements/drop-synonym-transact-sql
-    match_grammar: Matchable = Sequence(
-        "DROP",
-        "SYNONYM",
-        Ref("IfExistsGrammar", optional=True),
-        Ref("SynonymReferenceSegment"),
+    https://learn.microsoft.com/en-us/sql/t-sql/functions/openjson-transact-sql?view=sql-server-ver16#with_clause
+    """
+
+    type = "openjson_with_clause"
+
+    match_grammar = Sequence(
+        "WITH",
+        Bracketed(
+            Delimited(
+                Sequence(
+                    Ref("ColumnReferenceSegment"),
+                    Ref("DatatypeSegment"),
+                    Ref("QuotedLiteralSegment", optional=True),  # column_path
+                    Sequence(
+                        "AS",
+                        "JSON",
+                        optional=True,
+                    ),
+                ),
+            ),
+        ),
     )
 
 
-class SynonymReferenceSegment(ansi.ObjectReferenceSegment):
-    """A reference to a synonym.
+class OpenJsonSegment(BaseSegment):
+    """An `OPENJSON()` table-valued function.
 
-    A synonym may only (optionally) specify a schema. It may not specify a server
-    or database name.
+    https://learn.microsoft.com/en-us/sql/t-sql/functions/openjson-transact-sql?view=sql-server-ver16#syntax
     """
 
-    type = "synonym_reference"
-    # match grammar (allow whitespace)
-    match_grammar: Matchable = Sequence(
-        Ref("SingleIdentifierGrammar"),
-        AnyNumberOf(
-            Sequence(
-                Ref("DotSegment"),
-                Ref("SingleIdentifierGrammar", optional=True),
+    type = "openjson_segment"
+
+    match_grammar = Sequence(
+        "OPENJSON",
+        Bracketed(
+            Delimited(
+                Ref("QuotedLiteralSegmentOptWithN"),  # jsonExpression
+                Ref("ColumnReferenceSegment"),
+                Ref("ParameterNameSegment"),
+                Ref("QuotedLiteralSegment"),  # path
+            ),
+        ),
+        Ref("OpenJsonWithClauseSegment", optional=True),
+    )
+
+
+class CreateExternalTableStatementSegment(BaseSegment):
+    """A `CREATE EXTERNAL TABLE` statement.
+
+    https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-table-transact-sql?view=sql-server-ver16&tabs=dedicated
+    """
+
+    type = "create_external_table_statement"
+
+    match_grammar = Sequence(
+        "CREATE",
+        "EXTERNAL",
+        "TABLE",
+        Ref("ObjectReferenceSegment"),
+        Bracketed(
+            Delimited(
+                Ref("ColumnDefinitionSegment"),
+            ),
+        ),
+        "WITH",
+        Bracketed(
+            Delimited(
+                Ref("TableLocationClause"),
+                Sequence(
+                    "DATA_SOURCE",
+                    Ref("EqualsSegment"),
+                    Ref("ObjectReferenceSegment"),
+                ),
+                Sequence(
+                    "FILE_FORMAT",
+                    Ref("EqualsSegment"),
+                    Ref("ObjectReferenceSegment"),
+                ),
+                Sequence(
+                    "REJECT_TYPE",
+                    Ref("EqualsSegment"),
+                    OneOf("value", "percentage"),
+                ),
+                Sequence(
+                    "REJECT_VALUE",
+                    Ref("EqualsSegment"),
+                    Ref("NumericLiteralSegment"),
+                ),
+                Sequence(
+                    "REJECT_SAMPLE_VALUE",
+                    Ref("EqualsSegment"),
+                    Ref("NumericLiteralSegment"),
+                ),
+                Sequence(
+                    "REJECTED_ROW_LOCATION",
+                    Ref("EqualsSegment"),
+                    Ref("QuotedLiteralSegment"),
+                ),
             ),
-            min_times=0,
-            max_times=1,
         ),
     )
diff --git a/src/sqlfluff/dialects/dialect_tsql_keywords.py b/src/sqlfluff/dialects/dialect_tsql_keywords.py
index 559afca..f520239 100644
--- a/src/sqlfluff/dialects/dialect_tsql_keywords.py
+++ b/src/sqlfluff/dialects/dialect_tsql_keywords.py
@@ -14,6 +14,7 @@ RESERVED_KEYWORDS = [
     "ASC",
     "AUTHORIZATION",
     "BACKUP",
+    "BATCHSIZE",
     "BEGIN",
     "BETWEEN",
     "BREAK",
@@ -24,6 +25,7 @@ RESERVED_KEYWORDS = [
     "CASE",
     "CHECK",
     "CHECKPOINT",
+    "CHECK_CONSTRAINTS",
     "CLOSE",
     "CLUSTERED",
     "COALESCE",
@@ -38,13 +40,14 @@ RESERVED_KEYWORDS = [
     "CONVERT",
     "CREATE",
     "CROSS",
+    "CURRENT",
     "CURRENT_DATE",
     "CURRENT_TIME",
     "CURRENT_TIMESTAMP",
     "CURRENT_USER",
-    "CURRENT",
     "CURSOR",
     "DATABASE",
+    "DAY",
     "DBCC",
     "DEALLOCATE",
     "DECLARE",
@@ -206,6 +209,8 @@ RESERVED_KEYWORDS = [
 
 UNRESERVED_KEYWORDS = [
     "ABORT_AFTER_WAIT",
+    "ABORT",
+    "ABSENT",
     "AFTER",
     "ALGORITHM",
     "ALLOW_PAGE_LOCKS",
@@ -217,12 +222,14 @@ UNRESERVED_KEYWORDS = [
     "ANSI_NULLS",
     "ANSI_PADDING",
     "ANSI_WARNINGS",
+    "APPEND_ONLY",
     "APPLY",
     "ARITHABORT",
     "ARITHIGNORE",
     "AT",
     "AUTO",
     "BERNOULLI",
+    "BINARY",
     "BLOCKERS",
     "BREAK",
     "CACHE",
@@ -232,23 +239,35 @@ UNRESERVED_KEYWORDS = [
     "CATCH",
     "CODEPAGE",
     "COLUMN_ENCRYPTION_KEY",
-    "COLUMNSTORE",
     "COLUMNSTORE_ARCHIVE",
+    "COLUMNSTORE",
     "COMMITTED",
-    "CONCAT",
+    "COMPRESS_ALL_ROW_GROUPS",
+    "COMPRESSION_DELAY",
     "CONCAT_NULL_YIELDS_NULL",
+    "CONCAT",
+    "CONNECTION_OPTIONS",
+    "CONTAINED",
     "CONTINUE",
     "CONTROL",
-    "COMPRESSION_DELAY",
+    "CREDENTIAL",
     "CURSOR_CLOSE_ON_COMMIT",
     "CYCLE",
     "DATA_COMPRESSION",
+    "DATA_CONSISTENCY_CHECK",
+    "DATA_DELETION",
+    "DATA_SOURCE",
+    "DATA",
+    "DATAFILETYPE",
     "DATASOURCE",
+    "DATE_FORMAT",
     "DATE",
     "DATEFIRST",
     "DATEFORMAT",
     "DEADLOCK_PRIORITY",
     "DELAY",
+    "DELIMITEDTEXT",
+    "DELTA",
     "DENSE_RANK",
     "DETERMINISTIC",
     "DISABLE",
@@ -256,21 +275,37 @@ UNRESERVED_KEYWORDS = [
     "DISTRIBUTION",  # Azure Synapse Analytics specific
     "DROP_EXISTING",
     "DUMP",  # listed as reserved but functionally unreserved
+    "DURABILITY",
+    "ELEMENTS",
+    "ENCODING",
     "ENCRYPTED",
-    "ENCRYPTION",
     "ENCRYPTION_TYPE",
-    "ERRORFILE",
+    "ENCRYPTION",
     "ERRORFILE_DATA_SOURCE",
+    "ERRORFILE",
     "EXPAND",
     "EXPLAIN",  # Azure Synapse Analytics specific
     "EXPLICIT",
     "EXTERNALPUSHDOWN",
     "FAST",
+    "FIELD_TERMINATOR",
     "FIELDQUOTE",
+    "FIELDTERMINATOR",
+    "FILE_FORMAT",
     "FILESTREAM",
     "FILESTREAM_ON",
+    "FILESTREAM",
+    "FILETABLE_COLLATE_FILENAME",
+    "FILETABLE_DIRECTORY",
+    "FILETABLE_FULLPATH_UNIQUE_CONSTRAINT_NAME",
+    "FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME",
+    "FILETABLE_STREAMID_UNIQUE_CONSTRAINT_NAME",
+    "FILTER_COLUMN",
+    "FILTER_PREDICATE",
     "FILTER",
     "FIPS_FLAGGER",
+    "FIRE_TRIGGERS",
+    "FIRST_ROW",
     "FIRST",
     "FIRSTROW",
     "FMTONLY",
@@ -280,19 +315,26 @@ UNRESERVED_KEYWORDS = [
     "FORCEPLAN",
     "FORCESCAN",
     "FORCESEEK",
+    "FORMAT_OPTIONS",
+    "FORMAT_TYPE",
     "FORMAT",
-    "FORMATFILE",
     "FORMATFILE_DATA_SOURCE",
+    "FORMATFILE",
+    "GENERATED",
     "HASH",
     "HEAP",  # Azure Synapse Analytics specific
     "HIDDEN",
     "HINT",
-    "IGNORE",
+    "HISTORY_RETENTION_PERIOD",
+    "HISTORY_TABLE",
     "IGNORE_CONSTRAINTS",
     "IGNORE_DUP_KEY",
     "IGNORE_NONCLUSTERED_COLUMNSTORE_INDEX",
     "IGNORE_TRIGGERS",
+    "IGNORE",
     "IMPLICIT_TRANSACTIONS",
+    "INBOUND",
+    "INCLUDE_NULL_VALUES",
     "INCLUDE",
     "INCREMENT",
     "INLINE",
@@ -300,39 +342,44 @@ UNRESERVED_KEYWORDS = [
     "INTERVAL",
     "IO",
     "ISOLATION",
-    "GENERATED",
+    "JSON",
     "KEEP",
     "KEEPDEFAULTS",
     "KEEPFIXED",
     "KEEPIDENTITY",
-    # LABEL is an Azure Synapse Analytics specific reserved keyword
-    # but could break TSQL parsing to add there
-    "LABEL",
+    "KEEPNULLS",
+    "KILOBYTES_PER_BATCH",
+    "LABEL",  # *reserved* keyword in Azure Synapse; but would break TSQL parsing
     "LANGUAGE",
     "LAST",
     "LASTROW",
+    "LEDGER_VIEW",
+    "LEDGER",
     "LEVEL",
     "LOAD",  # listed as reserved but functionally unreserved
+    "LOB_COMPACTION",
     "LOCATION",
     "LOCK_TIMEOUT",
     "LOG",
     "LOGIN",
     "LOOP",
-    "MAX_DURATION",
-    "MAX_GRANT_PERCENT",
     "MASKED",
     "MATCHED",
+    "MAX_DURATION",
+    "MAX_GRANT_PERCENT",
     "MAX",
     "MAXDOP",
     "MAXERRORS",
     "MAXRECURSION",
     "MAXVALUE",
+    "MEMORY_OPTIMIZED",
+    "MIGRATION_STATE",
     "MIN_GRANT_PERCENT",
     "MINUTES",
     "MINVALUE",
     "NEXT",
-    "NO",
     "NO_PERFORMANCE_SPOOL",
+    "NO",
     "NOCOUNT",
     "NOEXEC",
     "NOEXPAND",
@@ -344,9 +391,14 @@ UNRESERVED_KEYWORDS = [
     "OBJECT",
     "OFFSET",
     "ONLINE",
-    "OPTIMIZE",
+    "OPENJSON",
+    "OPERATION_TYPE_COLUMN_NAME",
+    "OPERATION_TYPE_DESC_COLUMN_NAME",
     "OPTIMIZE_FOR_SEQUENTIAL_KEY",
+    "OPTIMIZE",
+    "ORC",
     "OUT",
+    "OUTBOUND",
     "OUTPUT",
     "OWNER",
     "PAD_INDEX",
@@ -354,34 +406,50 @@ UNRESERVED_KEYWORDS = [
     "PAGLOCK",
     "PARAMETER",
     "PARAMETERIZATION",
+    "PARQUET",
     "PARSEONLY",
+    "PARSER_VERSION",
     "PARTITION",
     "PARTITIONS",
     "PATH",
+    "PAUSE",
+    "PAUSED",
+    "PERCENTAGE",
     "PERCENTILE_CONT",
     "PERCENTILE_DISC",
+    "PERIOD",
     "PERSISTED",
     "PRECEDING",
     "PRECISION",  # listed as reserved but functionally unreserved
     "PRIOR",
     "PROFILE",
+    "PUSHDOWN",
     "QUERY_GOVERNOR_COST_LIMIT",
     "QUERYTRACEON",
     "QUOTED_IDENTIFIER",
+    "R",  # sqlcmd command
     "RANDOMIZED",
     "RANGE",
     "RANK",
     "RAW",
+    "RCFILE",
     "READCOMMITTED",
     "READCOMMITTEDLOCK",
     "READONLY",
     "READPAST",
     "READUNCOMMITTED",
+    "REBUILD",
     "RECEIVE",
     "RECOMPILE",
     "RECURSIVE",
+    "REJECT_SAMPLE_VALUE",
+    "REJECT_TYPE",
+    "REJECT_VALUE",
+    "REJECTED_ROW_LOCATION",
+    "REMOTE_DATA_ARCHIVE",
     "REMOTE_PROC_TRANSACTIONS",
     "RENAME",  # Azure Synapse Analytics specific
+    "REORGANIZE",
     "REPEATABLE",
     "REPEATABLEREAD",
     "REPLACE",
@@ -389,25 +457,37 @@ UNRESERVED_KEYWORDS = [
     "RESPECT",
     "RESULT_SET_CACHING",  # Azure Synapse Analytics specific
     "RESUMABLE",
+    "RESUME",
+    "RETENTION_PERIOD",
     "RETURNS",
     "ROBUST",
     "ROLE",
+    "ROOT",
     "ROUND_ROBIN",  # Azure Synapse Analytics specific
-    "ROW",
     "ROW_NUMBER",
+    "ROW",
     "ROWGUIDCOL",
     "ROWLOCK",
+    "ROWS_PER_BATCH",
     "ROWS",
+    "ROWTERMINATOR",
     "S",
     "SCALEOUTEXECUTION",
+    "SCHEMA_AND_DATA",
+    "SCHEMA_ONLY",
     "SCHEMABINDING",
+    "SCOPED",
+    "SECRET",
     "SECURITYAUDIT",  # listed as reserved but functionally unreserved
     "SELF",
-    "SETERROR",
-    "SEQUENCE",
+    "SEQUENCE_NUMBER_COLUMN_NAME",
     "SEQUENCE_NUMBER",
+    "SEQUENCE",
+    "SERDE_METHOD",
     "SERIALIZABLE",
     "SERVER",
+    "SETERROR",
+    "SETVAR",  # sqlcmd command
     "SHOWPLAN_ALL",
     "SHOWPLAN_TEXT",
     "SHOWPLAN_XML",
@@ -423,8 +503,11 @@ UNRESERVED_KEYWORDS = [
     "STATISTICS_INCREMENTAL",
     "STATISTICS_NORECOMPUTE",
     "STRING_AGG",
-    "SYNONYM",
+    "STRING_DELIMITER",
     "SWITCH",
+    "SYNONYM",
+    "SYSTEM_TIME",
+    "SYSTEM_VERSIONING",
     "SYSTEM",
     "TABLOCK",
     "TABLOCKX",
@@ -436,25 +519,32 @@ UNRESERVED_KEYWORDS = [
     "TIME",
     "TIMEOUT",
     "TIMESTAMP",
+    "TRANSACTION_ID_COLUMN_NAME",
     "TRANSACTION_ID",
     "TRUNCATE_TARGET",  # Azure Synapse Analytics specific
     "TRY",
     "TYPE",
-    "UPDLOCK",
     "UNBOUNDED",
     "UNCOMMITTED",
     "UNKNOWN",
+    "UPDLOCK",
+    "USE_TYPE_DEFAULT",
     "USER_DB",  # Azure Synapse Analytics specific, deprecated
     "USING",
     "VALUE",
     "VIEW_METADATA",
-    "WAITFOR",
     "WAIT_AT_LOW_PRIORITY",
-    "WITHIN",
+    "WAITFOR",
     "WHILE",
+    "WITHIN",
+    "WITHOUT_ARRAY_WRAPPER",
     "WORK",
     "XACT_ABORT",
     "XLOCK",
+    "XML_COMPRESSION",
     "XML",
+    "XMLDATA",
+    "XMLSCHEMA",
+    "XSINIL",
     "ZONE",
 ]
diff --git a/src/sqlfluff/diff_quality_plugin.py b/src/sqlfluff/diff_quality_plugin.py
index 26ef79b..393a083 100644
--- a/src/sqlfluff/diff_quality_plugin.py
+++ b/src/sqlfluff/diff_quality_plugin.py
@@ -3,7 +3,9 @@ import copy
 import json
 import logging
 import os
+import pathlib
 import sys
+import tempfile
 
 from diff_cover.command_runner import execute, run_command_for_code
 from diff_cover.hook import hookimpl as diff_cover_hookimpl
@@ -57,20 +59,23 @@ class SQLFluffViolationReporter(QualityReporter):
         if not self.driver_tool_installed:  # pragma: no cover
             raise OSError(f"{self.driver.name} is not installed")
 
-        output = self.reports if self.reports else self._run_sqlfluff(src_paths)
-        for o in output:
-            # Load and parse SQLFluff JSON output.
-            try:
-                report = json.loads(o)
-            except json.JSONDecodeError as e:  # pragma: no cover
-                print(f"Error parsing JSON output ({e}): {repr(o)}")
-                raise
-            else:
-                for file in report:
-                    self.violations_dict[file["filepath"]] = [
-                        Violation(v["line_no"], v["description"])
-                        for v in file["violations"]
-                    ]
+        if src_paths:
+            output = self.reports if self.reports else self._run_sqlfluff(src_paths)
+            for o in output:
+                # Load and parse SQLFluff JSON output.
+                try:
+                    report = json.loads(o)
+                except json.JSONDecodeError as e:  # pragma: no cover
+                    print(f"Error parsing JSON output ({e}): {repr(o)}")
+                    raise
+                else:
+                    for file in report:
+                        self.violations_dict[file["filepath"]] = [
+                            Violation(v["line_no"], v["description"])
+                            for v in file["violations"]
+                        ]
+        else:
+            logger.warning("Not running SQLFluff: No files to check")
         return self.violations_dict
 
     def _run_sqlfluff(self, src_paths):
@@ -83,16 +88,30 @@ class SQLFluffViolationReporter(QualityReporter):
             if src_path.endswith(".sql") and os.path.exists(src_path):
                 command.append(src_path.encode(sys.getfilesystemencoding()))
 
-        # Run SQLFluff.
-        printable_command = " ".join(
-            [
-                c.decode(sys.getfilesystemencoding()) if isinstance(c, bytes) else c
-                for c in command
-            ]
-        )
-        logger.warning(f"{printable_command}")
-        output = execute(command, self.driver.exit_codes)
-        return [output[1]] if self.driver.output_stderr else [output[0]]
+        with tempfile.NamedTemporaryFile(
+            prefix="sqlfluff-", suffix=".json", delete=False
+        ) as f:
+            f.close()
+            try:
+                # Write output to a temporary file. This avoids issues where
+                # extraneous SQLFluff or dbt output results in the JSON output
+                # being invalid.
+                command += ["--write-output", f.name]
+
+                # Run SQLFluff.
+                printable_command = " ".join(
+                    [
+                        c.decode(sys.getfilesystemencoding())
+                        if isinstance(c, bytes)
+                        else c
+                        for c in command
+                    ]
+                )
+                logger.warning(f"{printable_command}")
+                execute(command, self.driver.exit_codes)
+                return [pathlib.Path(f.name).read_text()]
+            finally:
+                os.remove(f.name)
 
     def measured_lines(self, src_path: str) -> None:  # pragma: no cover
         """Return list of the lines in src_path that were measured."""
diff --git a/src/sqlfluff/rules/L001.py b/src/sqlfluff/rules/L001.py
deleted file mode 100644
index 0f2e239..0000000
--- a/src/sqlfluff/rules/L001.py
+++ /dev/null
@@ -1,49 +0,0 @@
-"""Implementation of Rule L001."""
-from typing import List
-from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
-from sqlfluff.core.rules.crawlers import RootOnlyCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_fix_compatible,
-    document_groups,
-)
-from sqlfluff.utils.reflow import ReflowSequence
-
-
-@document_groups
-@document_fix_compatible
-class Rule_L001(BaseRule):
-    """Unnecessary trailing whitespace.
-
-    **Anti-pattern**
-
-    The ``•`` character represents a space.
-
-    .. code-block:: sql
-       :force:
-
-        SELECT
-            a
-        FROM foo••
-
-    **Best practice**
-
-    Remove trailing spaces.
-
-    .. code-block:: sql
-
-        SELECT
-            a
-        FROM foo
-    """
-
-    groups = ("all", "core")
-    crawl_behaviour = RootOnlyCrawler()
-
-    def _eval(self, context: RuleContext) -> List[LintResult]:
-        """Unnecessary trailing whitespace.
-
-        Look for newline segments, and then evaluate what
-        it was preceded by.
-        """
-        sequence = ReflowSequence.from_root(context.segment, config=context.config)
-        return sequence.respace(filter="newline").get_results()
diff --git a/src/sqlfluff/rules/L002.py b/src/sqlfluff/rules/L002.py
deleted file mode 100644
index ba85bf6..0000000
--- a/src/sqlfluff/rules/L002.py
+++ /dev/null
@@ -1,81 +0,0 @@
-"""Implementation of Rule L002."""
-from typing import Optional
-
-from sqlfluff.core.rules import BaseRule, LintResult, LintFix, RuleContext
-from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
-
-
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L002(BaseRule):
-    """Mixed Tabs and Spaces in single whitespace.
-
-    This rule will fail if a single section of whitespace
-    contains both tabs and spaces.
-
-    **Anti-pattern**
-
-    The ``•`` character represents a space and the ``→`` character represents a tab.
-    In this example, the second line contains two spaces and one tab.
-
-    .. code-block:: sql
-       :force:
-
-        SELECT
-        ••→a
-        FROM foo
-
-    **Best practice**
-
-    Change the line to use spaces only.
-
-    .. code-block:: sql
-       :force:
-
-        SELECT
-        ••••a
-        FROM foo
-
-    """
-
-    groups = ("all", "core")
-    config_keywords = ["tab_space_size"]
-    crawl_behaviour = SegmentSeekerCrawler({"whitespace"}, provide_raw_stack=True)
-
-    def _eval(self, context: RuleContext) -> Optional[LintResult]:
-        """Mixed Tabs and Spaces in single whitespace.
-
-        Only trigger from whitespace segments if they contain
-        multiple kinds of whitespace.
-        """
-        # Config type hints
-        self.tab_space_size: int
-
-        if context.segment.is_type("whitespace"):
-            if " " in context.segment.raw and "\t" in context.segment.raw:
-                if not context.raw_stack or context.raw_stack[-1].is_type("newline"):
-                    # We've got a single whitespace at the beginning of a line.
-                    # It's got a mix of spaces and tabs. Replace each tab with
-                    # a multiple of spaces
-                    return LintResult(
-                        anchor=context.segment,
-                        fixes=[
-                            LintFix.replace(
-                                context.segment,
-                                [
-                                    context.segment.edit(
-                                        context.segment.raw.replace(
-                                            "\t", " " * self.tab_space_size
-                                        )
-                                    ),
-                                ],
-                            ),
-                        ],
-                    )
-        return None
diff --git a/src/sqlfluff/rules/L003.py b/src/sqlfluff/rules/L003.py
deleted file mode 100644
index f09ba99..0000000
--- a/src/sqlfluff/rules/L003.py
+++ /dev/null
@@ -1,1074 +0,0 @@
-"""Implementation of Rule L003."""
-import dataclasses
-import itertools
-from typing import Dict, Iterable, List, Optional, Sequence, Set, Tuple
-
-from sqlfluff.core.parser import WhitespaceSegment
-from sqlfluff.core.parser.segments import BaseSegment
-from sqlfluff.core.rules import BaseRule, LintResult, LintFix, RuleContext
-from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
-from sqlfluff.utils.functional import Segments, rsp, sp
-from sqlfluff.core.templaters import TemplatedFile
-from sqlfluff.core.templaters.base import RawFileSlice
-
-
-@dataclasses.dataclass
-class _LineSummary:
-    """A dataobject to represent a line.
-
-    A _LineSummary is created and then filled with elements,
-    before calling self.finalise to generate a final
-    representation.
-    """
-
-    line_no: int = 0
-    line_buffer: List[BaseSegment] = dataclasses.field(default_factory=list)
-    indent_buffer: List[BaseSegment] = dataclasses.field(default_factory=list)
-    # As of end of line
-    indent_balance: int = 0
-    # As it was as of the "Anchor" / first code elem
-    anchor_indent_balance: int = 0
-    line_anchor: Optional[BaseSegment] = None
-
-    # Fixed calculated values
-    templated_line: Optional[int] = None
-    hanging_indent: Optional[int] = None
-    indent_size: int = 1
-    clean_indent: bool = True
-    templated_line_type: Optional[str] = None
-    is_comment_line: bool = False
-    is_empty_line: bool = False
-    has_code_segment: bool = False
-
-    line_indent_stack: List[int] = dataclasses.field(default_factory=list)
-    hanger_pos: Optional[int] = None
-
-    def __repr__(self) -> str:
-        """Printable Summary without Segments."""
-        keys_to_strip = (
-            "line_buffer",
-            "indent_buffer",
-            "as_of_anchor",
-        )
-        print_dict: Dict = {
-            key: value
-            for key, value in self.__dict__.copy().items()
-            if key not in keys_to_strip
-        }
-        print_dict["raw_line"] = self.template_content
-        return print_dict.__repr__()
-
-    @property
-    def template_content(self):  # pragma: no cover
-        return "".join(
-            seg.raw or getattr(seg, "source_str", "") for seg in self.line_buffer
-        )
-
-    def finalise(self, line_no: int, templated_file: Optional[TemplatedFile]):
-        """Create a final summary from a memo/marker line."""
-        copied_line_buffer = self.line_buffer[:]
-        # Generate our final line summary based on the current state
-        is_comment_line = all(
-            seg.is_type(
-                "whitespace",
-                "comment",
-                "indent",  # dedent is a subtype of indent
-                "end_of_file",
-            )
-            for seg in copied_line_buffer
-        )
-        has_code_segment = any(elem.is_code for elem in copied_line_buffer)
-        has_placeholder = any(
-            elem.is_type("placeholder") for elem in copied_line_buffer
-        )
-        is_empty_line = not has_code_segment and not has_placeholder
-
-        line_summary = self.__class__(
-            line_no=line_no,
-            templated_line=self.templated_line,
-            line_buffer=copied_line_buffer,
-            indent_buffer=self.indent_buffer,
-            indent_size=self.indent_size,
-            indent_balance=self.indent_balance,
-            anchor_indent_balance=self.anchor_indent_balance,
-            hanging_indent=self.hanger_pos if self.line_indent_stack else None,
-            # Clean indent is true if the line *ends* with an indent
-            # or has an indent in the initial whitespace.
-            clean_indent=self.clean_indent,
-            # Solidify expensive immutable characteristics
-            templated_line_type=_get_template_block_type(
-                copied_line_buffer, templated_file
-            ),
-            is_comment_line=is_comment_line,
-            is_empty_line=is_empty_line,
-            has_code_segment=has_code_segment,
-        )
-        return line_summary
-
-
-def _set_line_anchor(
-    line: _LineSummary,
-    anchor: Optional[BaseSegment],
-    tab_space_size: int,
-):
-    """Create a Line state of this line upon reaching the anchor."""
-    line.anchor_indent_balance = line.indent_balance
-    line.indent_size = _indent_size(
-        line.indent_buffer,
-        tab_space_size=tab_space_size,
-    )
-    line.line_anchor = anchor
-
-    return line
-
-
-def _is_clean_indent(prev_line_buffer: List[BaseSegment]):
-    """Check the previous line to see if the current state is a clean indent."""
-    # Assume an unclean indent, but if the last line
-    # ended with an indent then we might be ok.
-    # Was there an indent after the last code element of the previous line?
-    for search_elem in reversed(prev_line_buffer):
-        is_meta = search_elem.is_meta
-        if not search_elem.is_code and not is_meta:
-            continue
-        if is_meta and search_elem.indent_val > 0:  # type: ignore
-            return True
-        break
-
-    return False
-
-
-@dataclasses.dataclass
-class _Memory:
-    problem_lines: Set[int] = dataclasses.field(default_factory=set)
-    # hanging_lines keeps track of hanging lines so that we don't
-    # compare to them when assessing indent.
-    hanging_lines: Set[int] = dataclasses.field(default_factory=set)
-    comment_lines: Set[int] = dataclasses.field(default_factory=set)
-    line_summaries: Dict[int, _LineSummary] = dataclasses.field(default_factory=dict)
-
-    in_indent: bool = True
-    trigger: Optional[BaseSegment] = None
-
-    line_no: int = dataclasses.field(default=1)
-    start_process_raw_idx: int = dataclasses.field(default=0)
-
-    @property
-    def noncomparable_lines(self):
-        return self.hanging_lines.union(self.problem_lines)
-
-
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L003(BaseRule):
-    """Indentation not consistent with previous lines.
-
-    **Anti-pattern**
-
-    The ``•`` character represents a space.
-    In this example, the third line contains five spaces instead of four.
-
-    .. code-block:: sql
-       :force:
-
-        SELECT
-        ••••a,
-        •••••b
-        FROM foo
-
-
-    **Best practice**
-
-    Change the indentation to use a multiple of four spaces.
-
-    .. code-block:: sql
-       :force:
-
-        SELECT
-        ••••a,
-        ••••b
-        FROM foo
-
-    """
-
-    groups = ("all", "core")
-    # This rule is mostly a raw crawler, so not much performance gain to be
-    # had from being more specific.
-    crawl_behaviour = SegmentSeekerCrawler({"raw"}, provide_raw_stack=True)
-    targets_templated = True
-    _adjust_anchors = True
-    _ignore_types: List[str] = ["script_content"]
-    config_keywords = ["tab_space_size", "indent_unit", "hanging_indents"]
-
-    @staticmethod
-    def _make_indent(
-        num: int = 1, tab_space_size: int = 4, indent_unit: str = "space"
-    ) -> str:
-        if indent_unit == "tab":
-            return "\t" * num
-        if indent_unit == "space":
-            return " " * tab_space_size * num
-
-        raise ValueError(
-            f"Parameter indent_unit has unexpected value: `{indent_unit}`. Expected"
-            " `tab` or `space`."
-        )
-
-    @staticmethod
-    def _indent_size(segments: Sequence[BaseSegment], tab_space_size: int = 4) -> int:
-        return _indent_size(segments, tab_space_size)
-
-    @classmethod
-    def _process_raw_stack(
-        cls,
-        raw_stack: Tuple[BaseSegment, ...],
-        memory: _Memory,
-        tab_space_size: int = 4,
-        templated_file: Optional[TemplatedFile] = None,
-    ) -> Dict[int, _LineSummary]:
-        """Take the raw stack, split into lines and evaluate some stats."""
-        result_buffer: Dict[int, _LineSummary] = memory.line_summaries
-        cached_line_count = len(result_buffer)
-        starting_indent_balance = 0
-        if cached_line_count:
-            starting_indent_balance = result_buffer[cached_line_count].indent_balance
-
-        working_state = _LineSummary(indent_balance=starting_indent_balance)
-
-        line_no = memory.line_no
-        target_line_no = cached_line_count + 1
-        for idx, elem in enumerate(raw_stack[memory.start_process_raw_idx :]):
-            is_newline = elem.is_type("newline")
-            if line_no < target_line_no:
-                if is_newline:
-                    line_no += 1
-                    if line_no == target_line_no:
-                        memory.start_process_raw_idx += idx + 1
-                        memory.line_no = line_no
-                        working_state.templated_line = elem.is_templated
-                continue
-
-            working_state.line_buffer.append(elem)
-            # Pin indent_balance to above zero
-            if working_state.indent_balance < 0:
-                working_state.indent_balance = 0
-
-            if is_newline:
-                result_buffer[line_no] = working_state.finalise(line_no, templated_file)
-                # Set the "templated_line" if the newline that ended the *current* line
-                # was in templated space. Reason: We want to ignore indentation of lines
-                # not present in the raw (pre-templated) code.
-                working_state = _LineSummary(
-                    indent_balance=working_state.indent_balance,
-                    clean_indent=_is_clean_indent(working_state.line_buffer),
-                    templated_line=elem.is_templated,
-                )
-                line_no += 1
-                continue
-
-            if working_state.line_anchor is None:
-                working_state = cls._process_pre_anchor(
-                    elem, working_state, tab_space_size
-                )
-                # If we hit the trigger element, stop processing.
-                if elem is memory.trigger:
-                    break
-                continue
-
-            if elem.is_meta and elem.indent_val != 0:  # type: ignore
-                working_state = cls._process_line_indents(
-                    elem, working_state, tab_space_size
-                )
-                continue
-
-            elif elem.is_code and working_state.hanger_pos is None:
-                working_state.hanger_pos = cls._indent_size(
-                    working_state.line_buffer[:-1], tab_space_size=tab_space_size
-                )
-
-        # If we get to the end, and still have a buffer, add it on
-        if working_state.line_buffer:
-            result_buffer[line_no] = working_state.finalise(
-                line_no,
-                templated_file,
-            )
-        return result_buffer
-
-    @classmethod
-    def _process_line_indents(
-        cls,
-        elem: BaseSegment,
-        working_state: _LineSummary,
-        tab_space_size: int,
-    ) -> _LineSummary:
-        working_state.indent_balance += elem.indent_val  # type: ignore
-        if elem.indent_val > 0:  # type: ignore
-            # Keep track of the indent at the last ... indent
-            working_state.line_indent_stack.append(
-                cls._indent_size(
-                    working_state.line_buffer, tab_space_size=tab_space_size
-                )
-            )
-            working_state.hanger_pos = None
-            return working_state
-        # this is a dedent, we could still have a hanging indent,
-        # but only if there's enough on the stack
-        if working_state.line_indent_stack:
-            working_state.line_indent_stack.pop()
-        return working_state
-
-    @classmethod
-    def _process_pre_anchor(
-        cls,
-        elem: BaseSegment,
-        working_state: _LineSummary,
-        tab_space_size: int,
-    ) -> _LineSummary:
-        if elem.is_whitespace:
-            working_state.indent_buffer.append(elem)
-            return working_state
-        if elem.is_meta and elem.indent_val != 0:  # type: ignore
-            working_state.indent_balance += elem.indent_val  # type: ignore
-            if elem.indent_val > 0:  # type: ignore
-                # a "clean" indent is one where it contains
-                # an increase in indentation? Can't quite
-                # remember the logic here. Let's go with that.
-                working_state.clean_indent = True
-            return working_state
-
-        return _set_line_anchor(working_state, elem, tab_space_size)
-
-    def _coerce_indent_to(
-        self,
-        desired_indent: str,
-        current_indent_buffer: List[BaseSegment],
-        current_anchor: BaseSegment,
-    ) -> List[LintFix]:
-        """Generate fixes to make an indent a certain size.
-
-        Rather than blindly creating indent, we should _edit_
-        if at all possible, this stops other rules trying to
-        remove floating double indents.
-        """
-        existing_whitespace = [
-            seg for seg in current_indent_buffer if seg.is_type("whitespace")
-        ]
-        # Should we have an indent?
-        if len(desired_indent) == 0:
-            # No? Just delete everything
-            return [LintFix.delete(seg) for seg in existing_whitespace]
-        else:
-            # Is there already an indent?
-            if existing_whitespace:
-                # Edit the first, delete the rest.
-                edit_fix = LintFix.replace(
-                    existing_whitespace[0],
-                    [existing_whitespace[0].edit(desired_indent)],
-                )
-                delete_fixes = [LintFix.delete(seg) for seg in existing_whitespace[1:]]
-                return [edit_fix] + delete_fixes
-            else:
-                # Just create an indent.
-                return [
-                    LintFix.create_before(
-                        current_anchor,
-                        [
-                            WhitespaceSegment(
-                                raw=desired_indent,
-                            ),
-                        ],
-                    )
-                ]
-
-    def _eval(self, context: RuleContext) -> Optional[LintResult]:
-        """Indentation not consistent with previous lines.
-
-        To set the default tab size, set the `tab_space_size` value
-        in the appropriate configuration.
-
-        We compare each line (first non-whitespace element of the
-        line), with the indentation of previous lines. The presence
-        (or lack) of indent or dedent meta-characters indicate whether
-        the indent is appropriate.
-
-        - Any line is assessed by the indent level at the first non
-          whitespace element.
-        - Any increase in indentation may be _up to_ the number of
-          indent characters.
-        - Any line must be in line with the previous line which had
-          the same indent balance at its start.
-        - Apart from "whole" indents, a "hanging" indent is possible
-          if the line starts in line with either the indent of the
-          previous line or if it starts at the same indent as the *last*
-          indent meta segment in the previous line.
-
-        """
-        # Config type hints
-        self.tab_space_size: int
-        self.indent_unit: str
-        self.hanging_indents: bool
-        segment = context.segment
-        memory: _Memory = context.memory or _Memory()
-        raw_stack: Tuple[BaseSegment, ...] = context.raw_stack
-        if raw_stack and raw_stack[-1] is not context.segment:
-            raw_stack = raw_stack + (segment,)
-
-        is_ignorable = any(
-            el.is_type(*self._ignore_types) for el in context.parent_stack + (segment,)
-        )
-        if is_ignorable:
-            return LintResult(memory=memory)
-
-        if segment.is_type("newline"):
-            memory.in_indent = True
-        elif memory.in_indent:
-            has_children = bool(segment.segments)
-            if not (segment.is_whitespace or has_children or segment.is_type("indent")):
-                memory.in_indent = False
-                # First non-whitespace element is our trigger
-                memory.trigger = segment
-
-        if not segment.is_type("newline", "end_of_file"):
-            # Process on line ends or file end
-            return LintResult(memory=memory)
-
-        line_summaries = self._process_raw_stack(
-            raw_stack=raw_stack,
-            memory=memory,
-            tab_space_size=self.tab_space_size,
-            templated_file=context.templated_file,
-        )
-        memory.line_summaries = line_summaries
-        trigger_segment = memory.trigger
-        memory.trigger = None
-        if line_summaries and trigger_segment:
-            last_line_no = max(line_summaries.keys())
-            this_line = line_summaries[last_line_no]
-            result = self._process_working_state(memory, trigger_segment)
-            # Template lines don't need fixes
-            # However we do need the mutations from the processing.
-            if not this_line.templated_line:
-                return result
-
-        return LintResult(memory=memory)
-
-    def _process_working_state(
-        self,
-        memory: _Memory,
-        trigger_segment: BaseSegment,
-    ) -> LintResult:
-        """Checks indentation of one line of code, returning a LintResult.
-
-        The _eval() function calls it for the current line of code:
-        - When passed a newline segment (thus ending a line)
-        - When passed the *final* segment in the entire parse tree (which may
-          not be a newline)
-        """
-        line_summaries = memory.line_summaries
-        this_line_no = max(line_summaries.keys())
-        this_line: _LineSummary = line_summaries.pop(this_line_no)
-        self.logger.debug(
-            "Evaluating line #%s. %s",
-            this_line_no,
-            this_line,
-        )
-
-        if this_line.is_comment_line:
-            # Comment line, deal with it later.
-            memory.comment_lines.add(this_line_no)
-            self.logger.debug("    Comment Line. #%s", this_line_no)
-            return LintResult(memory=memory)
-
-        if this_line.line_buffer and this_line.line_buffer[0].is_type(
-            "end_of_file"
-        ):  # pragma: no cover
-            # This is just the end of the file.
-            self.logger.debug("    Just end of file. #%s", this_line_no)
-            return LintResult(memory=memory)
-
-        previous_line_numbers = sorted(line_summaries.keys(), reverse=True)
-        # we will iterate this more than once
-        previous_lines = list(map(lambda k: line_summaries[k], previous_line_numbers))
-
-        # handle hanging indents if allowed
-        hanger_res = self.hanging_indents and self._handle_hanging_indents(
-            this_line, previous_lines, memory
-        )
-        if hanger_res:
-            return hanger_res
-
-        # Is this an indented first line?
-        if this_line.line_no == 1 and this_line.indent_size > 0:
-            self.logger.debug("    Indented First Line. #%s", this_line_no)
-            return LintResult(
-                anchor=trigger_segment,
-                memory=memory,
-                description="First line has unexpected indent",
-                fixes=[LintFix.delete(elem) for elem in this_line.indent_buffer],
-            )
-
-        # Special handling for template end/mid blocks on a line by themselves.
-        # NOTE: Mid blocks (i.e. TemplateLoop segmets) behave like ends here, but
-        # don't otherwise have the same indent balance implications.
-        if this_line.templated_line_type in ("end", "mid"):
-            return self._handle_template_blocks(
-                this_line=this_line,
-                trigger_segment=trigger_segment,
-                previous_lines=previous_lines,
-                memory=memory,
-            )
-        # Assuming it's not a hanger, let's compare it to the other previous
-        # lines. We do it in reverse so that closer lines are more relevant.
-
-        prev_line = _find_previous_line(
-            this_line,
-            previous_lines,
-            memory.noncomparable_lines,
-        )
-
-        if not prev_line:
-            return LintResult(memory=memory)
-        prev_line_no = prev_line.line_no
-        indent_diff = this_line.anchor_indent_balance - prev_line.anchor_indent_balance
-        this_indent_num, this_indent_rem = divmod(
-            this_line.indent_size, self.tab_space_size
-        )
-        has_partial_indent = bool(this_indent_rem)
-        comp_indent_num = prev_line.indent_size // self.tab_space_size
-        # Is the indent balance the same?
-        if indent_diff == 0:
-            self.logger.debug(
-                "    [same indent balance] Comparing to #%s",
-                prev_line_no,
-            )
-            if this_line.indent_size != prev_line.indent_size:
-                # Indents don't match even though balance is the same...
-                memory.problem_lines.add(this_line_no)
-
-                # Work out desired indent
-                desired_indent = self._make_indent(
-                    indent_unit=self.indent_unit,
-                    tab_space_size=self.tab_space_size,
-                    num=comp_indent_num,
-                )
-
-                fixes = self._coerce_indent_to(
-                    desired_indent=desired_indent,
-                    current_indent_buffer=this_line.indent_buffer,
-                    current_anchor=trigger_segment,
-                )
-                self.logger.debug(
-                    "    !! Indentation does not match #%s. Fixes: %s",
-                    prev_line_no,
-                    fixes,
-                )
-                return LintResult(
-                    anchor=trigger_segment,
-                    memory=memory,
-                    description=_Desc(
-                        expected=comp_indent_num,
-                        found=this_indent_num,
-                        has_partial_indent=has_partial_indent,
-                        compared_to=prev_line.line_no,
-                    ),
-                    fixes=fixes,
-                )
-        # Are we at a deeper indent?
-        elif indent_diff > 0:
-            self.logger.debug(
-                "    [deeper indent balance] Comparing to #%s",
-                prev_line_no,
-            )
-            # NB: We shouldn't need to deal with correct hanging indents
-            # here, they should already have been dealt with before. We
-            # may still need to deal with *creating* hanging indents if
-            # appropriate.
-            self.logger.debug("    Comparison Line: %s", prev_line)
-
-            # Check to see if we've got a whole number of multiples. If
-            # we do then record the number for later, otherwise raise
-            # an error. We do the comparison here so we have a reference
-            # point to do the repairs. We need a sensible previous line
-            # to base the repairs off. If there's no indent at all, then
-            # we should also take this route because there SHOULD be one.
-            if this_line.indent_size % self.tab_space_size != 0:
-                memory.problem_lines.add(this_line_no)
-
-                # The default indent is the one just reconstructs it from
-                # the indent size.
-                desired_indent = self._make_indent(
-                    indent_unit=self.indent_unit,
-                    tab_space_size=self.tab_space_size,
-                    num=indent_diff + this_indent_num,
-                )
-                # If we have the option of a hanging indent then use it.
-                if self.hanging_indents and prev_line.hanging_indent:
-                    self.logger.debug("        Use hanging indent.")
-                    desired_indent = " " * prev_line.hanging_indent
-
-                fixes = self._coerce_indent_to(
-                    desired_indent=desired_indent,
-                    current_indent_buffer=this_line.indent_buffer,
-                    current_anchor=trigger_segment,
-                )
-                return LintResult(
-                    anchor=trigger_segment,
-                    memory=memory,
-                    description=_Desc(
-                        expected=len(desired_indent) // self.tab_space_size,
-                        found=this_indent_num,
-                        has_partial_indent=has_partial_indent,
-                        compared_to=prev_line.line_no,
-                    ),
-                    fixes=fixes,
-                )
-
-            # The indent number should be at least 1, and can be UP TO
-            # and including the difference in the indent balance.
-            if comp_indent_num == this_indent_num:
-                # We have two lines indented the same, but with a different starting
-                # indent balance. This is either a problem OR a sign that one of the
-                # opening indents wasn't used. We account for the latter and then
-                # have a violation if that wasn't the case.
-
-                # Does the comparison line have enough unused indent to get us back
-                # to where we need to be? NB: This should only be applied if this is
-                # a CLOSING bracket.
-
-                # First work out if we have some closing brackets, and if so, how
-                # many.
-                b_num = 0
-                for elem in this_line.line_buffer:
-                    if not elem.is_code:
-                        continue
-                    if elem.is_type("end_bracket", "end_square_bracket"):
-                        b_num += 1
-                        continue
-                    break  # pragma: no cover
-
-                if b_num < indent_diff:
-                    # It doesn't. That means we *should* have an indent when
-                    # compared to this line and we DON'T.
-                    memory.problem_lines.add(this_line_no)
-                    return LintResult(
-                        anchor=trigger_segment,
-                        memory=memory,
-                        description=_Desc(
-                            expected=this_indent_num + 1,
-                            found=this_indent_num,
-                            has_partial_indent=has_partial_indent,
-                            compared_to=prev_line.line_no,
-                        ),
-                        # Coerce the indent to what we think it should be.
-                        fixes=self._coerce_indent_to(
-                            desired_indent=self._make_indent(
-                                num=this_indent_num + 1,
-                                tab_space_size=self.tab_space_size,
-                                indent_unit=self.indent_unit,
-                            ),
-                            current_indent_buffer=this_line.indent_buffer,
-                            current_anchor=trigger_segment,
-                        ),
-                    )
-            elif (
-                this_indent_num < comp_indent_num
-                or this_indent_num > comp_indent_num + indent_diff
-            ):
-                memory.problem_lines.add(this_line_no)
-                desired_indent = self._make_indent(
-                    num=comp_indent_num,
-                    indent_unit=self.indent_unit,
-                    tab_space_size=self.tab_space_size,
-                )
-                fixes = self._coerce_indent_to(
-                    desired_indent=desired_indent,
-                    current_indent_buffer=this_line.indent_buffer,
-                    current_anchor=trigger_segment,
-                )
-                return LintResult(
-                    anchor=trigger_segment,
-                    memory=memory,
-                    description=_Desc(
-                        expected=comp_indent_num,
-                        found=this_indent_num,
-                        has_partial_indent=has_partial_indent,
-                        compared_to=prev_line.line_no,
-                    ),
-                    fixes=fixes,
-                )
-
-        # This was a valid comparison, so if it doesn't flag then
-        # we can assume that we're ok.
-        self.logger.debug("    Indent deemed ok comparing to #%s", prev_line_no)
-        comment_fix = self._calculate_comment_fixes(
-            memory, previous_line_numbers, this_line
-        )
-        return comment_fix or LintResult(memory=memory)
-
-    def _calculate_comment_fixes(
-        self, memory: _Memory, previous_line_numbers: List[int], this_line: _LineSummary
-    ) -> Optional[LintResult]:
-        # Given that this line is ok, consider if the preceding lines are
-        # comments. If they are, lint the indentation of the comment(s).
-        fixes: List[LintFix] = []
-        anchor: Optional[BaseSegment] = None
-        for n in previous_line_numbers:
-            if n not in memory.comment_lines:
-                break
-            # The previous line WAS a comment.
-            prev_line = memory.line_summaries[n]
-            if this_line.indent_size != prev_line.indent_size:
-                # It's not aligned.
-                # Find the anchor first.
-                for seg in prev_line.line_buffer:
-                    if seg.is_type("comment"):
-                        anchor = seg
-                        break
-
-                if not anchor:  # pragma: no cover
-                    continue
-
-                fixes += self._coerce_indent_to(
-                    desired_indent="".join(
-                        elem.raw for elem in this_line.indent_buffer
-                    ),
-                    current_indent_buffer=prev_line.indent_buffer,
-                    current_anchor=anchor,
-                )
-
-                memory.problem_lines.add(n)
-
-        if not fixes:
-            return None
-
-        return LintResult(
-            anchor=anchor,
-            memory=memory,
-            description="Comment not aligned with following line.",
-            fixes=fixes,
-        )
-
-    def _handle_hanging_indents(
-        self,
-        this_line: _LineSummary,
-        previous_lines: List[_LineSummary],
-        memory: _Memory,
-    ) -> Optional[LintResult]:
-        if len(previous_lines) == 0:
-            return None
-
-        last_line = _find_last_meaningful_line(previous_lines)
-        if not last_line:
-            return None
-        # Handle Hanging Indents
-        is_anchor_indent_match = (
-            this_line.anchor_indent_balance == last_line.anchor_indent_balance
-        )
-        is_end_indent_match = this_line.indent_size == last_line.indent_size
-        is_known_hanging_line = last_line.line_no in memory.hanging_lines
-        # There MUST also be a non-zero indent. Otherwise we're just on the
-        # baseline.
-        if this_line.indent_size <= 0:
-            return None
-
-        # NB: Hangers are only allowed if there was content after the last
-        # indent on the previous line. Otherwise it's just an indent.
-        is_hanging_match = this_line.indent_size == last_line.hanging_indent
-        # Or they're if the indent balance is the same and the indent is the
-        # same AND the previous line was a hanger
-        is_matching_previous = (
-            is_anchor_indent_match and is_end_indent_match and is_known_hanging_line
-        )
-        if not is_matching_previous and not is_hanging_match:
-            return None
-        memory.hanging_lines.add(this_line.line_no)
-        self.logger.debug("    Hanger Line. #%s", this_line.line_no)
-        self.logger.debug("    Last Line: %s", last_line)
-        return LintResult(memory=memory)
-
-    def _handle_template_blocks(
-        self,
-        this_line: _LineSummary,
-        trigger_segment: BaseSegment,
-        previous_lines: List[_LineSummary],
-        memory: _Memory,
-    ):
-        # For a template block end on a line by itself, search for a
-        # matching block start on a line by itself. If there is one, match
-        # its indentation. Question: Could we avoid treating this as a
-        # special case? It has some similarities to the non-templated test
-        # case test/fixtures/linter/indentation_error_contained.sql, in that
-        # both have lines where anchor_indent_balance drops 2 levels from one line
-        # to the next, making it a bit unclear how to indent that line.
-        template_line = _find_matching_start_line(previous_lines)
-        # In rare circumstances there may be disbalanced pairs
-        if not template_line:
-            return LintResult(memory=memory)  # pragma: no cover
-
-        if template_line.line_no in memory.noncomparable_lines:
-            return LintResult(memory=memory)
-
-        self.logger.debug(
-            "    [template block end] Comparing to #%s", template_line.line_no
-        )
-        if this_line.indent_size == template_line.indent_size:
-            return LintResult(memory=memory)
-
-        memory.problem_lines.add(this_line.line_no)
-
-        # The previous indent.
-        desired_indent = "".join(elem.raw for elem in template_line.indent_buffer)
-        first_non_indent_i = len(this_line.indent_buffer)
-        current_anchor = this_line.line_buffer[first_non_indent_i]
-        fixes = self._coerce_indent_to(
-            desired_indent=desired_indent,
-            current_indent_buffer=this_line.indent_buffer,
-            current_anchor=current_anchor,
-        )
-        self.logger.debug(
-            "    !! Indentation does not match #%s. Fixes: %s",
-            template_line.line_no,
-            fixes,
-        )
-        return LintResult(
-            anchor=trigger_segment,
-            memory=memory,
-            description=_Desc(
-                len(desired_indent) // self.tab_space_size,
-                this_line.indent_size,
-                template_line.line_no,
-            ),
-            fixes=fixes,
-        )
-
-
-class _TemplateLineInterpreter:
-    start_blocks = (
-        ("placeholder", "block_start"),
-        ("placeholder", "compound"),
-        ("placeholder", "literal"),
-        ("placeholder", "block_mid"),
-    )
-    indent_types = (
-        ("indent", None),
-        ("newline", None),
-    )
-    valid_start_combos = list(
-        itertools.product(
-            start_blocks,
-            indent_types,
-        )
-    )
-    dedent_types = (("dedent", None),)
-    end_block = (
-        ("placeholder", "block_end"),
-        ("placeholder", "compound"),
-        ("placeholder", "block_mid"),
-    )
-    valid_end_combos = list(
-        itertools.product(
-            dedent_types,
-            end_block,
-        )
-    )
-
-    def __init__(
-        self,
-        working_state: List[BaseSegment],
-        templated_file: Optional[TemplatedFile],
-    ) -> None:
-        self.working_state = [el for el in working_state if not el.is_whitespace]
-        self.templated_file = templated_file
-        self._adjacent_pairs: Optional[
-            List[Tuple[Tuple[str, Optional[str]], Tuple[str, Optional[str]]]]
-        ] = None
-
-    def is_single_placeholder_line(self):
-        count_placeholder = 0
-        for seg in self.working_state:
-            if seg.is_code:
-                return False
-            elif seg.is_type("placeholder"):
-                count_placeholder += 1
-
-        return count_placeholder == 1
-
-    def is_template_loop_line(self):
-        for seg in self.working_state:
-            if seg.is_code:
-                return False
-            if seg.is_type("template_loop"):
-                return True
-        return False
-
-    def list_segment_and_raw_segment_types(self) -> Iterable[Tuple[str, Optional[str]]]:
-        """Yields the tuple of seg type and underlying type were applicable."""
-        for seg in self.working_state:
-            raw_seg = self.get_raw_slices(seg)
-            raw_str = raw_seg[0].slice_type if raw_seg else None
-            yield (seg.type, raw_str)
-
-    def get_adjacent_type_pairs(self):
-        """Produce a list of pairs of each sequenctial combo of two."""
-        if self._adjacent_pairs:
-            return self._adjacent_pairs
-        iterable = self.list_segment_and_raw_segment_types()
-        a, b = itertools.tee(iterable)
-        # consume the first item in b
-        next(b, None)
-        self._adjacent_pairs = list(zip(a, b))
-        return self._adjacent_pairs
-
-    def is_block_start(self):
-        return any(
-            pair in self.valid_start_combos for pair in self.get_adjacent_type_pairs()
-        )
-
-    def is_block_end(self):
-        return any(
-            pair in self.valid_end_combos for pair in self.get_adjacent_type_pairs()
-        )
-
-    def block_type(self) -> Optional[str]:
-        """Return a block_type enum."""
-        if not self.templated_file:
-            return None
-
-        if self.is_template_loop_line():
-            return "mid"
-
-        if not self.is_single_placeholder_line():
-            return None
-
-        if self.is_block_end():
-            return "end"
-
-        if self.is_block_start():
-            return "start"
-
-        return None
-
-    def get_raw_slices(self, elem: BaseSegment) -> Optional[List[RawFileSlice]]:
-        if not self.templated_file:  # pragma: no cover
-            return None
-
-        if not elem.is_type("placeholder"):
-            return None
-
-        assert elem.pos_marker, "TypeGuard"
-        slices = self.templated_file.raw_slices_spanning_source_slice(
-            elem.pos_marker.source_slice
-        )
-        return slices or None
-
-
-def _get_template_block_type(
-    line_buffer: List[BaseSegment],
-    templated_file: Optional[TemplatedFile] = None,
-):
-    """Convenience fn for getting 'start', 'end' etc of a placeholder line."""
-    template_info = _TemplateLineInterpreter(line_buffer, templated_file)
-    return template_info.block_type()
-
-
-def _segment_length(elem: BaseSegment, tab_space_size: int):
-    # Start by assuming the typical case, where we need not consider slices
-    # or templating.
-    raw = elem.raw
-
-    # If it's whitespace, it might be a mixture of literal and templated
-    # whitespace. Check for this.
-    if elem.is_type("whitespace") and elem.is_templated:
-        # Templated case: Find the leading *literal* whitespace.
-        assert elem.pos_marker
-        templated_file = elem.pos_marker.templated_file
-        # Extract the leading literal whitespace, slice by slice.
-        raw = ""
-        for raw_slice in Segments(
-            elem, templated_file=templated_file
-        ).raw_slices.select(loop_while=rsp.is_slice_type("literal")):
-            # Compute and append raw_slice's contribution.
-            raw += sp.raw_slice(elem, raw_slice)
-
-    # convert to spaces for convenience (and hanging indents)
-    return raw.replace("\t", " " * tab_space_size)
-
-
-def _indent_size(segments: Sequence[BaseSegment], tab_space_size: int = 4) -> int:
-    indent_size = 0
-    for elem in segments:
-        raw = _segment_length(elem, tab_space_size)
-        indent_size += len(raw)
-    return indent_size
-
-
-def _find_last_meaningful_line(
-    previous_lines: List[_LineSummary],
-) -> Optional[_LineSummary]:
-    # Find last meaningful line indent.
-    for line in previous_lines:
-        if line.has_code_segment:
-            return line
-
-    return None
-
-
-def _find_previous_line(
-    this_line: _LineSummary,
-    previous_lines: List[_LineSummary],
-    ignoreable_lines: Set[int],
-) -> Optional[_LineSummary]:
-    for prev_line in previous_lines:
-        should_ignore = prev_line.line_no in ignoreable_lines
-        if should_ignore or prev_line.is_empty_line:
-            continue
-
-        # Work out the difference in indent
-        indent_diff = this_line.anchor_indent_balance - prev_line.anchor_indent_balance
-        # If we're comparing to a previous, more deeply indented line,
-        # then skip and keep looking.
-        if indent_diff < 0:
-            continue
-        return prev_line
-    return None
-
-
-def _find_matching_start_line(
-    previous_lines: List[_LineSummary],
-) -> Optional[_LineSummary]:
-    template_block_level = -1
-    for template_line in previous_lines:
-        if not template_line.templated_line_type:
-            continue
-        if template_line.templated_line_type == "end":
-            template_block_level -= 1
-        else:
-            template_block_level += 1
-
-        if template_block_level != 0:
-            continue
-
-        return template_line
-    return None  # pragma: no cover
-
-
-def _Desc(
-    expected: int, found: int, compared_to: int, has_partial_indent: bool = False
-) -> str:
-    indentations = "indentation" if expected == 1 else "indentations"
-    if found >= expected and has_partial_indent:
-        found_explanation = f"more than {found}"
-    elif found < expected and has_partial_indent:
-        found_explanation = f"less than {found + 1}"
-    else:
-        found_explanation = str(found)
-    return (
-        f"Expected {expected} {indentations},"
-        f" found {found_explanation} [compared to line {compared_to:02}]"
-    )
diff --git a/src/sqlfluff/rules/L004.py b/src/sqlfluff/rules/L004.py
deleted file mode 100644
index 322305b..0000000
--- a/src/sqlfluff/rules/L004.py
+++ /dev/null
@@ -1,109 +0,0 @@
-"""Implementation of Rule L004."""
-from sqlfluff.core.parser import WhitespaceSegment
-from sqlfluff.core.rules import BaseRule, LintResult, LintFix, RuleContext
-from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
-
-
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L004(BaseRule):
-    """Incorrect indentation type.
-
-    .. note::
-       Note 1: spaces are only fixed to tabs if the number of spaces in the
-       indent is an integer multiple of the ``tab_space_size`` config.
-
-       Note 2: fixes are only applied to indents at the start of a line. Indents
-       after other text on the same line are not fixed.
-
-    **Anti-pattern**
-
-    Using tabs instead of spaces when ``indent_unit`` config set to ``space`` (default).
-
-    .. code-block:: sql
-       :force:
-
-        select
-        ••••a,
-        →   b
-        from foo
-
-    **Best practice**
-
-    Change the line to use spaces only.
-
-    .. code-block:: sql
-       :force:
-
-        select
-        ••••a,
-        ••••b
-        from foo
-    """
-
-    groups = ("all", "core")
-    config_keywords = ["indent_unit", "tab_space_size"]
-    crawl_behaviour = SegmentSeekerCrawler({"whitespace"}, provide_raw_stack=True)
-
-    # TODO fix indents after text:
-    # https://github.com/sqlfluff/sqlfluff/pull/590#issuecomment-739484190
-    def _eval(self, context: RuleContext) -> LintResult:
-        """Incorrect indentation found in file."""
-        # Config type hints
-        self.tab_space_size: int
-        self.indent_unit: str
-
-        tab = "\t"
-        space = " "
-        correct_indent = self.indent
-        wrong_indent = (
-            tab if self.indent_unit == "space" else space * self.tab_space_size
-        )
-        if (
-            context.segment.is_type("whitespace")
-            and wrong_indent in context.segment.raw
-        ):
-            fixes = []
-            description = "Incorrect indentation type found in file."
-            edit_indent = context.segment.raw.replace(wrong_indent, correct_indent)
-            pre_seg = context.raw_stack[-1] if context.raw_stack else None
-            # Ensure that the number of space indents is a multiple of tab_space_size
-            # before attempting to convert spaces to tabs to avoid mixed indents
-            # unless we are converted tabs to spaces (indent_unit = space)
-            if (
-                (
-                    self.indent_unit == "space"
-                    or context.segment.raw.count(space) % self.tab_space_size == 0
-                )
-                # Only attempt a fix at the start of a newline for now
-                and (pre_seg is None or pre_seg.is_type("newline"))
-            ):
-                fixes = [
-                    LintFix.replace(
-                        context.segment,
-                        [
-                            WhitespaceSegment(raw=edit_indent),
-                        ],
-                    )
-                ]
-            elif not (pre_seg is None or pre_seg.is_type("newline")):
-                # give a helpful message if the wrong indent has been found and is not
-                # at the start of a newline
-                description += (
-                    " The indent occurs after other text, so a manual fix is needed."
-                )
-            else:
-                # If we get here, the indent_unit is tabs, and the number of spaces is
-                # not a multiple of tab_space_size
-                description += " The number of spaces is not a multiple of "
-                "tab_space_size, so a manual fix is needed."
-            return LintResult(
-                anchor=context.segment, fixes=fixes, description=description
-            )
-        return LintResult()
diff --git a/src/sqlfluff/rules/L005.py b/src/sqlfluff/rules/L005.py
deleted file mode 100644
index 1cb469e..0000000
--- a/src/sqlfluff/rules/L005.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""Implementation of Rule L005."""
-from typing import List
-
-from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
-from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
-from sqlfluff.utils.reflow.sequence import ReflowSequence
-
-
-@document_groups
-@document_fix_compatible
-class Rule_L005(BaseRule):
-    """Commas should not have whitespace directly before them.
-
-    Unless it's an indent. Trailing/leading commas are dealt with
-    in a different rule.
-
-    **Anti-pattern**
-
-    The ``•`` character represents a space.
-    There is an extra space in line two before the comma.
-
-    .. code-block:: sql
-       :force:
-
-        SELECT
-            a•,
-            b
-        FROM foo
-
-    **Best practice**
-
-    Remove the space before the comma.
-
-    .. code-block:: sql
-
-        SELECT
-            a,
-            b
-        FROM foo
-    """
-
-    groups = ("all", "core")
-    crawl_behaviour = SegmentSeekerCrawler({"comma"})
-
-    def _eval(self, context: RuleContext) -> List[LintResult]:
-        """Commas should not have whitespace directly before them."""
-        results = (
-            ReflowSequence.from_around_target(
-                context.segment,
-                context.parent_stack[0],
-                config=context.config,
-                sides="before",
-            )
-            .respace()
-            .get_results()
-        )
-        # Because whitespace management is currently spread across a couple
-        # of rules, we filter just to results with deletes in them here.
-        return [
-            result
-            for result in results
-            if all(fix.edit_type == "delete" for fix in result.fixes)
-        ]
diff --git a/src/sqlfluff/rules/L006.py b/src/sqlfluff/rules/L006.py
deleted file mode 100644
index f0f92d2..0000000
--- a/src/sqlfluff/rules/L006.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""Implementation of Rule L006."""
-
-
-from typing import List
-
-from sqlfluff.core.rules import (
-    BaseRule,
-    LintResult,
-    RuleContext,
-)
-from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
-from sqlfluff.utils.reflow import ReflowSequence
-
-
-@document_groups
-@document_fix_compatible
-class Rule_L006(BaseRule):
-    """Operators should be surrounded by a single whitespace.
-
-    **Anti-pattern**
-
-    In this example, there is a space missing between the operator and ``b``.
-
-    .. code-block:: sql
-
-        SELECT
-            a +b
-        FROM foo
-
-
-    **Best practice**
-
-    Keep a single space.
-
-    .. code-block:: sql
-
-        SELECT
-            a + b
-        FROM foo
-    """
-
-    groups = ("all", "core")
-    crawl_behaviour = SegmentSeekerCrawler(
-        {"binary_operator", "comparison_operator", "assignment_operator"}
-    )
-
-    def _eval(self, context: RuleContext) -> List[LintResult]:
-        """Operators should be surrounded by a single whitespace.
-
-        Rewritten to assess direct children of a segment to make
-        whitespace insertion more sensible.
-
-        We only need to handle *missing* whitespace because excess
-        whitespace is handled by L039.
-
-        NOTE: We also allow bracket characters either side.
-        """
-        # Iterate through children of this segment looking for any of the
-        # target types. We also check for whether any of the children start
-        # or end with the targets.
-
-        # We ignore any targets which start or finish this segment. They'll
-        # be dealt with by the parent segment. That also means that we need
-        # to have at least three children.
-
-        # Operators can be either a single raw segment or multiple, and
-        # a significant number of them are multiple (thanks TSQL). While
-        # we could provide an alternative route for single raws, this is
-        # implemented to separately look before, and after. In the single
-        # raw case - they'll be targeting the same segment, and potentially
-        # waste some processing overhead, but this makes the code simpler.
-
-        # If this is an operator within an operator, we'll double count
-        # so abort.
-        if context.parent_stack and context.parent_stack[-1].is_type(
-            "assignment_operator"
-        ):
-            return []
-
-        results = (
-            ReflowSequence.from_around_target(
-                context.segment, context.parent_stack[0], config=context.config
-            )
-            .respace()
-            .get_results()
-        )
-
-        # Because *excess whitespace* is handled elsewhere until 2.0.0
-        # we should only return results which *create* whitespace.
-
-        return [
-            result
-            for result in results
-            if all(fix.edit_type.startswith("create") for fix in result.fixes)
-        ]
diff --git a/src/sqlfluff/rules/L007.py b/src/sqlfluff/rules/L007.py
deleted file mode 100644
index 6e042a4..0000000
--- a/src/sqlfluff/rules/L007.py
+++ /dev/null
@@ -1,78 +0,0 @@
-"""Implementation of Rule L007."""
-
-from typing import List
-
-from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
-from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
-from sqlfluff.utils.reflow import ReflowSequence
-
-
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L007(BaseRule):
-    """Operators should follow a standard for being before/after newlines.
-
-    **Anti-pattern**
-
-    In this example, if ``operator_new_lines = after`` (or unspecified, as is the
-    default), then the operator ``+`` should not be at the end of the second line.
-
-    .. code-block:: sql
-
-        SELECT
-            a +
-            b
-        FROM foo
-
-
-    **Best practice**
-
-    If ``operator_new_lines = after`` (or unspecified, as this is the default),
-    place the operator after the newline.
-
-    .. code-block:: sql
-
-        SELECT
-            a
-            + b
-        FROM foo
-
-    If ``operator_new_lines = before``, place the operator before the newline.
-
-    .. code-block:: sql
-
-        SELECT
-            a +
-            b
-        FROM foo
-    """
-
-    groups = ("all",)
-    crawl_behaviour = SegmentSeekerCrawler({"binary_operator", "comparison_operator"})
-
-    def _eval(self, context: RuleContext) -> List[LintResult]:
-        """Operators should follow a standard for being before/after newlines.
-
-        We use the memory to keep track of whitespace up to now, and
-        whether the last code segment was an operator or not.
-        Anchor is our signal as to whether there's a problem.
-
-        We only trigger if we have an operator FOLLOWED BY a newline
-        before the next meaningful code segment.
-        """
-        return (
-            ReflowSequence.from_around_target(
-                context.segment,
-                root_segment=context.parent_stack[0],
-                config=context.config,
-            )
-            .rebreak()
-            .get_results()
-        )
diff --git a/src/sqlfluff/rules/L008.py b/src/sqlfluff/rules/L008.py
deleted file mode 100644
index 29bb109..0000000
--- a/src/sqlfluff/rules/L008.py
+++ /dev/null
@@ -1,54 +0,0 @@
-"""Implementation of Rule L008."""
-from typing import List
-
-from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
-from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
-
-from sqlfluff.utils.reflow.sequence import ReflowSequence
-
-
-@document_groups
-@document_fix_compatible
-class Rule_L008(BaseRule):
-    """Commas should be followed by a single whitespace unless followed by a comment.
-
-    **Anti-pattern**
-
-    In this example, there is no space between the comma and ``'zoo'``.
-
-    .. code-block:: sql
-
-        SELECT
-            *
-        FROM foo
-        WHERE a IN ('plop','zoo')
-
-    **Best practice**
-
-    Keep a single space after the comma. The ``•`` character represents a space.
-
-    .. code-block:: sql
-       :force:
-
-        SELECT
-            *
-        FROM foo
-        WHERE a IN ('plop',•'zoo')
-    """
-
-    groups = ("all", "core")
-    crawl_behaviour = SegmentSeekerCrawler({"comma"})
-
-    def _eval(self, context: RuleContext) -> List[LintResult]:
-        """Commas should not have whitespace directly before them."""
-        return (
-            ReflowSequence.from_around_target(
-                context.segment,
-                context.parent_stack[0],
-                config=context.config,
-                sides="after",
-            )
-            .respace()
-            .get_results()
-        )
diff --git a/src/sqlfluff/rules/L016.py b/src/sqlfluff/rules/L016.py
deleted file mode 100644
index c31b909..0000000
--- a/src/sqlfluff/rules/L016.py
+++ /dev/null
@@ -1,624 +0,0 @@
-"""Implementation of Rule L016."""
-
-from typing import cast, List, Optional, Sequence, Tuple
-
-from sqlfluff.core.parser import (
-    BaseSegment,
-    NewlineSegment,
-    RawSegment,
-    WhitespaceSegment,
-)
-
-from sqlfluff.core.rules import LintFix, LintResult, RuleContext
-from sqlfluff.utils.functional import sp, Segments
-from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
-from sqlfluff.rules.L003 import Rule_L003
-
-
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L016(Rule_L003):
-    """Line is too long."""
-
-    groups = ("all", "core")
-    crawl_behaviour = SegmentSeekerCrawler({"newline"}, provide_raw_stack=True)
-    _adjust_anchors = True
-    _check_docstring = False
-
-    config_keywords = [
-        "max_line_length",
-        "tab_space_size",
-        "indent_unit",
-        "ignore_comment_lines",
-        "ignore_comment_clauses",
-    ]
-
-    def _eval_line_for_breaks(self, segments: List[RawSegment]) -> List[LintFix]:
-        """Evaluate the line for break points.
-
-        We split the line into a few particular sections:
-        - The indent (all the whitespace up to this point)
-        - Content (which doesn't have whitespace at the start or end)
-        - Breakpoint (which contains Indent/Dedent and potential
-          whitespace). NB: If multiple indent/dedent sections share
-          a breakpoint, then they will occupy the SAME one, so that
-          dealing with whitespace post-split is easier.
-        - Pausepoint (which is a comma, potentially surrounded by
-          whitespace). This is for potential list splitting.
-
-        Once split, we'll use a separate method to work out what
-        combinations make most sense for reflow.
-        """
-        chunk_buff = []
-        indent_section = None
-
-        class Section:
-            def __init__(
-                self,
-                segments: Sequence[RawSegment],
-                role: str,
-                indent_balance: int,
-                indent_impulse: Optional[int] = None,
-            ):
-                self.segments = segments
-                self.role = role
-                self.indent_balance = indent_balance
-                self.indent_impulse: int = indent_impulse or 0
-
-            def __repr__(self):
-                return (
-                    "<Section @ {pos}: {role} [{indent_balance}:{indent_impulse}]. "
-                    "{segments!r}>".format(
-                        role=self.role,
-                        indent_balance=self.indent_balance,
-                        indent_impulse=self.indent_impulse,
-                        segments="".join(elem.raw for elem in self.segments),
-                        pos=self.segments[0].get_start_point_marker()
-                        if self.segments
-                        else "",
-                    )
-                )
-
-            @property
-            def raw(self) -> str:
-                return "".join(seg.raw for seg in self.segments)
-
-            @staticmethod
-            def find_segment_at(segments, loc: Tuple[int, int]) -> RawSegment:
-                for seg in segments:
-                    if not seg.is_meta and seg.pos_marker.working_loc == loc:
-                        return seg
-                raise ValueError("Segment not found")  # pragma: no cover
-
-            def generate_fixes_to_coerce(
-                self,
-                segments: List[RawSegment],
-                indent_section: "Section",
-                crawler: Rule_L016,
-                indent: int,
-            ) -> List[LintFix]:
-                """Generate a list of fixes to create a break at this point.
-
-                The `segments` argument is necessary to extract anchors
-                from the existing segments.
-                """
-                fixes = []
-
-                # Generate some sample indents:
-                unit_indent = crawler._make_indent(
-                    indent_unit=crawler.indent_unit,
-                    tab_space_size=crawler.tab_space_size,
-                )
-                indent_p1 = indent_section.raw + unit_indent
-                if unit_indent in indent_section.raw:
-                    indent_m1 = indent_section.raw.replace(unit_indent, "", 1)
-                else:
-                    indent_m1 = indent_section.raw
-
-                if indent > 0:
-                    new_indent = indent_p1
-                elif indent < 0:
-                    new_indent = indent_m1
-                else:
-                    new_indent = indent_section.raw
-
-                create_anchor = self.find_segment_at(
-                    segments, self.segments[-1].get_end_loc()
-                )
-
-                if self.role == "pausepoint":
-                    # Assume that this means there isn't a breakpoint
-                    # and that we'll break with the same indent as the
-                    # existing line.
-
-                    # NOTE: Deal with commas and binary operators differently here.
-                    # Maybe only deal with commas to start with?
-                    if any(
-                        seg.is_type("binary_operator") for seg in self.segments
-                    ):  # pragma: no cover
-                        raise NotImplementedError(
-                            "Don't know how to deal with binary operators here yet!!"
-                        )
-
-                    # Remove any existing whitespace
-                    for elem in self.segments:
-                        if not elem.is_meta and elem.is_type("whitespace"):
-                            fixes.append(LintFix.delete(elem))
-
-                    # Create a newline and a similar indent
-                    fixes.append(
-                        LintFix.create_before(
-                            create_anchor,
-                            [
-                                NewlineSegment(),
-                                WhitespaceSegment(new_indent),
-                            ],
-                        )
-                    )
-                    return fixes
-
-                if self.role == "breakpoint":
-                    # Can we determine the required indent just from
-                    # the info in this segment only?
-
-                    # Remove anything which is already here
-                    for elem in self.segments:
-                        if not elem.is_meta:
-                            fixes.append(LintFix.delete(elem))
-                    # Create a newline, create an indent of the relevant size
-                    fixes.append(
-                        LintFix.create_before(
-                            create_anchor,
-                            [
-                                NewlineSegment(),
-                                WhitespaceSegment(new_indent),
-                            ],
-                        )
-                    )
-                    return fixes
-                raise ValueError(
-                    f"Unexpected break generated at {self}"
-                )  # pragma: no cover
-
-        segment_buff: Tuple[RawSegment, ...] = ()
-        whitespace_buff: Tuple[RawSegment, ...] = ()
-        indent_impulse = 0
-        indent_balance = 0
-        is_pause = False
-
-        seg: RawSegment
-        for seg in segments:
-            if indent_section is None:
-                if seg.is_type("whitespace") or seg.is_meta:
-                    whitespace_buff += (seg,)
-                else:
-                    indent_section = Section(
-                        segments=whitespace_buff,
-                        role="indent",
-                        indent_balance=indent_balance,
-                    )
-                    whitespace_buff = ()
-                    segment_buff = (seg,)
-            else:
-                if seg.is_type("whitespace") or seg.is_meta:
-                    whitespace_buff += (seg,)
-                    if seg.is_meta:
-                        indent_impulse += seg.indent_val
-                else:
-                    # We got something other than whitespace or a meta.
-                    # Have we passed an indent?
-                    if indent_impulse != 0:
-                        # Yes. Bank the section, perhaps also with a content
-                        # section.
-                        if segment_buff:
-                            chunk_buff.append(
-                                Section(
-                                    segments=segment_buff,
-                                    role="content",
-                                    indent_balance=indent_balance,
-                                )
-                            )
-                            segment_buff = ()
-                        # Deal with the whitespace
-                        chunk_buff.append(
-                            Section(
-                                segments=whitespace_buff,
-                                role="breakpoint",
-                                indent_balance=indent_balance,
-                                indent_impulse=indent_impulse,
-                            )
-                        )
-                        whitespace_buff = ()
-                        indent_balance += indent_impulse
-                        indent_impulse = 0
-
-                    # Did we think we were in a pause?
-                    # TODO: Renable binary operator breaks some time in future.
-                    if is_pause:
-                        # We need to end the comma/operator
-                        # (taking any whitespace with it).
-                        chunk_buff.append(
-                            Section(
-                                segments=segment_buff + whitespace_buff,
-                                role="pausepoint",
-                                indent_balance=indent_balance,
-                            )
-                        )
-                        # Start the segment buffer off with this section.
-                        whitespace_buff = ()
-                        segment_buff = (seg,)
-                        is_pause = False
-                    else:
-                        # We're not in a pause (or not in a pause yet)
-                        if seg.is_type("comma"):  # or seg.is_type('binary_operator')
-                            if segment_buff:
-                                # End the previous section, start a comma/operator.
-                                # Any whitespace is added to the segment
-                                # buff to go with the comma.
-                                chunk_buff.append(
-                                    Section(
-                                        segments=segment_buff,
-                                        role="content",
-                                        indent_balance=indent_balance,
-                                    )
-                                )
-                                segment_buff = ()
-
-                            # Having a double comma should be impossible
-                            # but let's deal with that case regardless.
-                            segment_buff += whitespace_buff + (seg,)
-                            whitespace_buff = ()
-                            is_pause = True
-                        else:
-                            # Not in a pause, it's not a comma, were in
-                            # some content.
-                            segment_buff += whitespace_buff + (seg,)
-                            whitespace_buff = ()
-
-        # We're at the end, do we have anything left?
-        if is_pause:
-            role = "pausepoint"
-        elif segment_buff:
-            role = "content"
-        elif indent_impulse:  # pragma: no cover
-            role = "breakpoint"
-        else:
-            # This can happen, e.g. with a long template line. Treat it as
-            # unfixable.
-            return []
-        chunk_buff.append(
-            Section(
-                segments=segment_buff + whitespace_buff,
-                role=role,
-                indent_balance=indent_balance,
-            )
-        )
-
-        self.logger.info("Sections:")
-        for idx, sec in enumerate(chunk_buff):
-            self.logger.info(f"    {idx}: {sec!r}")
-
-        # How do we prioritise where to work?
-        # First, do we ever go through a negative breakpoint?
-        lowest_bal = min(sec.indent_balance for sec in chunk_buff)
-        split_at = []  # split_at is probably going to be a list.
-        if lowest_bal < 0:
-            for sec in chunk_buff:
-                if sec.indent_balance == 0 and sec.indent_impulse < 0:
-                    split_at = [(sec, -1)]
-                    break
-        # Assuming we never go negative, we'll either use a pause
-        # point in the base indent balance, or we'll split out
-        # a section or two using the lowest breakpoints.
-        else:
-            # Look for low level pauses. Additionally, ignore
-            # them if they're a comma at the end of the line,
-            # they're useless for splitting
-            pauses = [
-                sec
-                for sec in chunk_buff
-                if sec.role == "pausepoint" and sec.indent_balance == 0
-                # Not the last chunk
-                and sec is not chunk_buff[-1]
-            ]
-            if any(pauses):
-                split_at = [(pause, 0) for pause in pauses]
-            else:
-                # No pauses and no negatives. We should extract
-                # a subsection using the breakpoints.
-
-                # We'll definitely have an up. It's possible that the *down*
-                # might not be on this line, so we have to allow for that case.
-                upbreaks = [
-                    sec
-                    for sec in chunk_buff
-                    if sec.role == "breakpoint"
-                    and sec.indent_balance == 0
-                    and sec.indent_impulse > 0
-                ]
-                if not upbreaks:
-                    # No upbreaks?!
-                    # abort
-                    return []
-                # First up break
-                split_at = [(upbreaks[0], 1)]
-                downbreaks = [
-                    sec
-                    for sec in chunk_buff
-                    if sec.role == "breakpoint"
-                    and sec.indent_balance + sec.indent_impulse == 0
-                    and sec.indent_impulse < 0
-                ]
-                # First down break where we reach the base
-                if downbreaks:
-                    split_at.append((downbreaks[0], 0))
-                # If no downbreaks then the corresponding downbreak isn't on this line.
-
-        self.logger.info("Split at: %s", split_at)
-
-        fixes = []
-        for split, indent in split_at:
-            if split.segments:
-                assert indent_section
-                fixes += split.generate_fixes_to_coerce(
-                    segments, indent_section, self, indent
-                )
-
-        self.logger.info("Fixes: %s", fixes)
-
-        return fixes
-
-    @staticmethod
-    def _gen_line_so_far(raw_stack: Tuple[RawSegment, ...]) -> List[RawSegment]:
-        """Work out from the raw stack what the elements on this line are.
-
-        Returns:
-            :obj:`list` of segments
-
-        """
-        working_buff: List[RawSegment] = []
-        idx = -1
-        while True:
-            if len(raw_stack) >= abs(idx):
-                s = raw_stack[idx]
-                if s.is_type("newline"):
-                    break
-                else:
-                    working_buff.insert(0, s)
-                    idx -= 1
-            else:
-                break  # pragma: no cover
-        return working_buff
-
-    @classmethod
-    def _compute_segment_length(cls, segment: BaseSegment) -> int:
-        if segment.is_type("newline"):
-            # Generally, we won't see newlines, but if we do, simply ignore
-            # them. Rationale: The intent of this rule is to enforce maximum
-            # line length, and newlines don't make lines longer.
-            return 0
-
-        assert segment.pos_marker
-        if "\n" in segment.pos_marker.source_str():
-            # Similarly we shouldn't see newlines in source segments
-            # However for templated loops it's often not possible to
-            # accurately calculate the segments. These will be caught by
-            # the first iteration of the loop (which is non-templated)
-            # so doesn't suffer from the same bug, so we can ignore these
-            return 0
-
-        # Compute the length of this segments in SOURCE space (before template
-        # expansion).
-        slice_length = (
-            segment.pos_marker.source_slice.stop - segment.pos_marker.source_slice.start
-        )
-        if slice_length:
-            return slice_length
-        else:
-            # If a segment did not originate from the original source, its slice
-            # length slice length will be zero. This occurs, for example, when
-            # other lint rules add indentation or other whitespace. In that
-            # case, compute the length of its contents.
-            return len(segment.raw)
-
-    def _compute_source_length(
-        self,
-        segments: Sequence[BaseSegment],
-        literals_in_comments: Sequence[BaseSegment],
-    ) -> int:
-        line_len = 0
-        seen_slices = set()
-        for segment in segments:
-            if segment in literals_in_comments:
-                self.logger.debug("Not counting literal in comment: %s", segment)
-                continue
-
-            assert segment.pos_marker
-            slice = (
-                segment.pos_marker.source_slice.start,
-                segment.pos_marker.source_slice.stop,
-            )
-            # Often, a single templated area of a source file will expand to
-            # multiple SQL tokens. Here, we use a set to avoid double counting
-            # the length of that text. For example, in BigQuery, we might
-            # see this source query:
-            #
-            # SELECT user_id
-            # FROM `{{bi_ecommerce_orders}}` {{table_at_job_start}}
-            #
-            # where 'table_at_job_start' is defined as:
-            # "FOR SYSTEM_TIME AS OF CAST('2021-03-02T01:22:59+00:00' AS TIMESTAMP)"
-            #
-            # So this one substitution results in roughly 10 segments (one per
-            # word or bit of punctuation). Each of these would have the same
-            # source slice, and if we didn't correct for this, we'd count the
-            # length of {{bi_ecommerce_orders}} roughly 10 times, resulting in
-            # vast overcount of the source length.
-            #
-            # :TRICKY: New segments (i.e. those introduced by earlier fixes)
-            # have empty source slices. We definitely want to count the length
-            # of these segments. We can be sure they aren't the tricky templated
-            # segment case described above because new segments are never templated
-            # (because "sqlfluff fix" produced them, not the templater!).
-            if (
-                slice[0] == slice[1] and not segment.is_meta
-            ) or slice not in seen_slices:
-                seen_slices.add(slice)
-                line_len += self._compute_segment_length(segment)
-        return line_len
-
-    def _eval(self, context: RuleContext) -> Optional[LintResult]:
-        """Line is too long.
-
-        This only triggers on newline segments, evaluating the whole line.
-        The detection is simple, the fixing is much trickier.
-
-        """
-        # Config type hints
-        self.max_line_length: int
-        self.ignore_comment_lines: bool
-        self.ignore_comment_clauses: bool
-
-        assert context.segment.is_type("newline")
-
-        # iterate to buffer the whole line up to this point
-        this_line = self._gen_line_so_far(context.raw_stack)
-
-        # Do any literals on this line belong to a comment?
-        literals_in_comments: Sequence[BaseSegment] = []
-        if self.ignore_comment_clauses:
-            quoted_literals = Segments(*this_line).select(
-                select_if=sp.is_type("quoted_literal")
-            )
-            if quoted_literals:
-                self.logger.debug(
-                    "Comment Checking: Found quoted literals: %s", quoted_literals
-                )
-                root = context.parent_stack[0]
-
-                def is_in_comment(seg):
-                    """This evaluates whether the parent segment is a comment.
-
-                    We use path_to to get the stack of segments from the root
-                    to the given segment. The last element of that stack is the
-                    given segment (`seg` in this function), the second last
-                    is the parent of that segment: i.e. path[-2].
-                    """
-                    path = root.path_to(seg)
-                    # It's unlikely that the path will be less than 2 segments
-                    # long. That would imply that we've passed the root segment
-                    # itself - but in that case - we should conclude it's not
-                    # in a comment.
-                    if len(path) < 1:
-                        return False  # pragma: no cover
-                    parent = path[-1].segment
-                    return parent.is_type("comment_clause", "comment_equals_clause")
-
-                literals_in_comments = quoted_literals.select(select_if=is_in_comment)
-                if literals_in_comments:
-                    self.logger.debug(
-                        "Comment Checking: Literals in comments: %s",
-                        literals_in_comments,
-                    )
-
-        # Now we can work out the line length and deal with the content
-        line_len = self._compute_source_length(this_line, literals_in_comments)
-        if line_len > self.max_line_length:
-            # Problem, we'll be reporting a violation. The
-            # question is, can we fix it?
-
-            # We'll need the indent, so let's get it for fixing.
-            line_indent = []
-            for s in this_line:
-                if s.is_type("whitespace"):
-                    line_indent.append(s)
-                else:
-                    break
-
-            # Don't even attempt to handle template placeholders as gets
-            # complicated if logic changes (e.g. moving for loops). Most of
-            # these long lines will likely be single line Jinja comments.
-            # They will remain as unfixable.
-            if this_line[-1].type == "placeholder":
-                if (
-                    this_line[-1].block_type != "comment"  # type: ignore[attr-defined]
-                    or not self.ignore_comment_clauses
-                ):
-                    self.logger.info("Unfixable template segment: %s", this_line[-1])
-                    return LintResult(anchor=context.segment)
-                else:
-                    return LintResult()
-
-            # Does the line end in an inline comment that we can move back?
-            if this_line[-1].is_type("inline_comment"):
-                # Is this line JUST COMMENT (with optional preceding whitespace) if
-                # so, user will have to fix themselves.
-                if len(this_line) == 1 or all(
-                    elem.is_type("whitespace") or elem.is_meta
-                    for elem in this_line[:-1]
-                ):
-                    self.logger.info(
-                        "Unfixable inline comment, alone on line: %s", this_line[-1]
-                    )
-                    if self.ignore_comment_lines:
-                        return LintResult()
-                    else:
-                        return LintResult(anchor=context.segment)
-
-                self.logger.info(
-                    "Attempting move of inline comment at end of line: %s",
-                    this_line[-1],
-                )
-                # Set up to delete the original comment and the preceding whitespace
-                delete_buffer = [LintFix.delete(this_line[-1])]
-                idx = -2
-                while True:
-                    if len(this_line) >= abs(idx) and this_line[idx].is_type(
-                        "whitespace"
-                    ):
-                        delete_buffer.append(LintFix.delete(this_line[idx]))
-                        idx -= 1
-                    else:
-                        break  # pragma: no cover
-                create_elements = line_indent + [
-                    this_line[-1],
-                    cast(RawSegment, context.segment),
-                ]
-                if (
-                    self._compute_source_length(create_elements, literals_in_comments)
-                    > self.max_line_length
-                ):
-                    # The inline comment is NOT on a line by itself, but even if
-                    # we move it onto a line by itself, it's still too long. In
-                    # this case, the rule should do nothing, otherwise it
-                    # triggers an endless cycle of "fixes" that simply keeps
-                    # adding blank lines.
-                    self.logger.info(
-                        "Unfixable inline comment, too long even on a line by itself: "
-                        "%s",
-                        this_line[-1],
-                    )
-                    if self.ignore_comment_lines:
-                        return LintResult()
-                    else:
-                        return LintResult(anchor=context.segment)
-                # Create a newline before this one with the existing comment, an
-                # identical indent AND a terminating newline, copied from the current
-                # target segment.
-                create_buffer = [LintFix.create_before(this_line[0], create_elements)]
-                return LintResult(
-                    anchor=context.segment,
-                    fixes=delete_buffer + create_buffer,
-                )
-
-            fixes = self._eval_line_for_breaks(this_line)
-            if fixes:
-                return LintResult(anchor=context.segment, fixes=fixes)
-            return LintResult(anchor=context.segment)
-        return LintResult()
diff --git a/src/sqlfluff/rules/L023.py b/src/sqlfluff/rules/L023.py
deleted file mode 100644
index 33478f7..0000000
--- a/src/sqlfluff/rules/L023.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""Implementation of Rule L023."""
-
-from typing import List
-
-from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
-from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
-
-from sqlfluff.utils.functional import FunctionalContext, sp
-from sqlfluff.utils.reflow.sequence import ReflowSequence
-
-
-@document_groups
-@document_fix_compatible
-class Rule_L023(BaseRule):
-    """Single whitespace expected after ``AS`` in ``WITH`` clause.
-
-    **Anti-pattern**
-
-    .. code-block:: sql
-
-        WITH plop AS(
-            SELECT * FROM foo
-        )
-
-        SELECT a FROM plop
-
-
-    **Best practice**
-
-    Add a space after ``AS``, to avoid confusing it for a function.
-    The ``•`` character represents a space.
-
-    .. code-block:: sql
-       :force:
-
-        WITH plop AS•(
-            SELECT * FROM foo
-        )
-
-        SELECT a FROM plop
-    """
-
-    groups = ("all", "core")
-    crawl_behaviour = SegmentSeekerCrawler({"common_table_expression"})
-    target_keyword = "AS"
-    strip_newlines = True
-
-    def _eval(self, context: RuleContext) -> List[LintResult]:
-        """Single whitespace expected in mother middle segment."""
-        functional = FunctionalContext(context)
-
-        as_keyword = (
-            functional.segment.children(sp.is_keyword(self.target_keyword))
-            .first()
-            .get()
-        )
-        if not as_keyword:
-            # No target keyword. Abort.
-            return []
-
-        # Respace the section immediately after the keyword. If any fixes
-        # are returned it implies there was an issue.
-        return (
-            ReflowSequence.from_around_target(
-                as_keyword,
-                context.parent_stack[0],
-                config=context.config,
-                sides="after",
-            )
-            .respace(strip_newlines=self.strip_newlines)
-            .get_results()
-        )
diff --git a/src/sqlfluff/rules/L024.py b/src/sqlfluff/rules/L024.py
deleted file mode 100644
index a63a7f0..0000000
--- a/src/sqlfluff/rules/L024.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""Implementation of Rule L024."""
-
-
-from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
-from sqlfluff.rules.L023 import Rule_L023
-
-
-@document_groups
-@document_fix_compatible
-class Rule_L024(Rule_L023):
-    """Single whitespace expected after ``USING`` in ``JOIN`` clause.
-
-    **Anti-pattern**
-
-    .. code-block:: sql
-
-        SELECT b
-        FROM foo
-        LEFT JOIN zoo USING(a)
-
-    **Best practice**
-
-    Add a space after ``USING``, to avoid confusing it
-    for a function.
-
-    .. code-block:: sql
-       :force:
-
-        SELECT b
-        FROM foo
-        LEFT JOIN zoo USING (a)
-    """
-
-    groups = ("all", "core")
-    crawl_behaviour = SegmentSeekerCrawler({"join_clause"})
-    target_keyword = "USING"
-    strip_newlines = False
diff --git a/src/sqlfluff/rules/L036.py b/src/sqlfluff/rules/L036.py
deleted file mode 100644
index 8bb2745..0000000
--- a/src/sqlfluff/rules/L036.py
+++ /dev/null
@@ -1,428 +0,0 @@
-"""Implementation of Rule L036."""
-
-from typing import List, NamedTuple, Optional, Sequence
-
-from sqlfluff.core.parser import WhitespaceSegment
-
-from sqlfluff.core.parser import BaseSegment, NewlineSegment
-from sqlfluff.core.parser.segments.base import IdentitySet
-from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
-from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
-from sqlfluff.utils.functional import Segments, sp, FunctionalContext
-
-
-class SelectTargetsInfo(NamedTuple):
-    """Info about select targets and nearby whitespace."""
-
-    select_idx: int
-    first_new_line_idx: int
-    first_select_target_idx: int
-    first_whitespace_idx: int
-    comment_after_select_idx: int
-    select_targets: Sequence[BaseSegment]
-    from_segment: Optional[BaseSegment]
-    pre_from_whitespace: List[BaseSegment]
-
-
-@document_groups
-@document_configuration
-@document_fix_compatible
-class Rule_L036(BaseRule):
-    """Select targets should be on a new line unless there is only one select target.
-
-    .. note::
-       By default, a wildcard (e.g. ``SELECT *``) is considered a single select target.
-       If you want it to be treated as multiple select targets, configure
-       ``wildcard_policy = multiple``.
-
-    **Anti-pattern**
-
-    Multiple select targets on the same line.
-
-    .. code-block:: sql
-        :force:
-
-        select a, b
-        from foo
-
-        -- Single select target on its own line.
-
-        SELECT
-            a
-        FROM foo
-
-
-    **Best practice**
-
-    Multiple select targets each on their own line.
-
-    .. code-block:: sql
-        :force:
-
-        select
-            a,
-            b
-        from foo
-
-        -- Single select target on the same line as the ``SELECT``
-        -- keyword.
-
-        SELECT a
-        FROM foo
-
-    """
-
-    groups = ("all",)
-    config_keywords = ["wildcard_policy"]
-    crawl_behaviour = SegmentSeekerCrawler({"select_clause"})
-
-    def _eval(self, context: RuleContext):
-        self.wildcard_policy: str
-        assert context.segment.is_type("select_clause")
-        select_targets_info = self._get_indexes(context)
-        select_clause = FunctionalContext(context).segment
-        wildcards = select_clause.children(
-            sp.is_type("select_clause_element")
-        ).children(sp.is_type("wildcard_expression"))
-        has_wildcard = bool(wildcards)
-        if len(select_targets_info.select_targets) == 1 and (
-            not has_wildcard or self.wildcard_policy == "single"
-        ):
-            return self._eval_single_select_target_element(
-                select_targets_info,
-                context,
-            )
-        elif len(select_targets_info.select_targets):
-            return self._eval_multiple_select_target_elements(
-                select_targets_info, context.segment
-            )
-
-    @staticmethod
-    def _get_indexes(context: RuleContext):
-        children = FunctionalContext(context).segment.children()
-        select_targets = children.select(sp.is_type("select_clause_element"))
-        first_select_target_idx = children.find(select_targets.get())
-        selects = children.select(sp.is_keyword("select"))
-        select_idx = children.find(selects.get()) if selects else -1
-        newlines = children.select(sp.is_type("newline"))
-        first_new_line_idx = children.find(newlines.get()) if newlines else -1
-        comment_after_select_idx = -1
-        if newlines:
-            comment_after_select = children.select(
-                sp.is_type("comment"),
-                start_seg=selects.get(),
-                stop_seg=newlines.get(),
-                loop_while=sp.or_(
-                    sp.is_type("comment"), sp.is_type("whitespace"), sp.is_meta()
-                ),
-            )
-            if comment_after_select:
-                comment_after_select_idx = (
-                    children.find(comment_after_select.get())
-                    if comment_after_select
-                    else -1
-                )
-        first_whitespace_idx = -1
-        if first_new_line_idx != -1:
-            # TRICKY: Ignore whitespace prior to the first newline, e.g. if
-            # the line with "SELECT" (before any select targets) has trailing
-            # whitespace.
-            segments_after_first_line = children.select(
-                sp.is_type("whitespace"), start_seg=children[first_new_line_idx]
-            )
-            first_whitespace_idx = children.find(segments_after_first_line.get())
-
-        siblings_post = FunctionalContext(context).siblings_post
-        from_segment = siblings_post.first(sp.is_type("from_clause")).first().get()
-        pre_from_whitespace = siblings_post.select(
-            sp.is_type("whitespace"), stop_seg=from_segment
-        )
-        return SelectTargetsInfo(
-            select_idx,
-            first_new_line_idx,
-            first_select_target_idx,
-            first_whitespace_idx,
-            comment_after_select_idx,
-            select_targets,
-            from_segment,
-            list(pre_from_whitespace),
-        )
-
-    def _eval_multiple_select_target_elements(self, select_targets_info, segment):
-        """Multiple select targets. Ensure each is on a separate line."""
-        # Insert newline before every select target.
-        fixes = []
-        for i, select_target in enumerate(select_targets_info.select_targets):
-            base_segment = (
-                segment if not i else select_targets_info.select_targets[i - 1]
-            )
-            if (
-                base_segment.pos_marker.working_line_no
-                == select_target.pos_marker.working_line_no
-            ):
-                # Find and delete any whitespace before the select target.
-                start_seg = select_targets_info.select_idx
-                # If any select modifier (e.g. distinct ) is present, start
-                # there rather than at the beginning.
-                modifier = segment.get_child("select_clause_modifier")
-                if modifier:
-                    start_seg = segment.segments.index(modifier)
-
-                ws_to_delete = segment.select_children(
-                    start_seg=segment.segments[start_seg]
-                    if not i
-                    else select_targets_info.select_targets[i - 1],
-                    select_if=lambda s: s.is_type("whitespace"),
-                    loop_while=lambda s: s.is_type("whitespace", "comma") or s.is_meta,
-                )
-                fixes += [LintFix.delete(ws) for ws in ws_to_delete]
-                fixes.append(LintFix.create_before(select_target, [NewlineSegment()]))
-
-            # If we are at the last select target check if the FROM clause
-            # is on the same line, and if so move it to its own line.
-            if select_targets_info.from_segment:
-                if (i + 1 == len(select_targets_info.select_targets)) and (
-                    select_target.pos_marker.working_line_no
-                    == select_targets_info.from_segment.pos_marker.working_line_no
-                ):
-                    fixes.extend(
-                        [
-                            LintFix.delete(ws)
-                            for ws in select_targets_info.pre_from_whitespace
-                        ]
-                    )
-                    fixes.append(
-                        LintFix.create_before(
-                            select_targets_info.from_segment,
-                            [NewlineSegment()],
-                        )
-                    )
-
-        if fixes:
-            return LintResult(anchor=segment, fixes=fixes)
-
-    def _eval_single_select_target_element(
-        self, select_targets_info, context: RuleContext
-    ):
-        select_clause = FunctionalContext(context).segment
-        parent_stack = context.parent_stack
-
-        if (
-            select_targets_info.select_idx
-            < select_targets_info.first_new_line_idx
-            < select_targets_info.first_select_target_idx
-        ):
-            # Do we have a modifier?
-            select_children = select_clause.children()
-            modifier: Optional[Segments]
-            modifier = select_children.first(sp.is_type("select_clause_modifier"))
-
-            # Prepare the select clause which will be inserted
-            insert_buff = [
-                WhitespaceSegment(),
-                select_children[select_targets_info.first_select_target_idx],
-            ]
-
-            # Check if the modifier is one we care about
-            if modifier:
-                # If it's already on the first line, ignore it.
-                if (
-                    select_children.index(modifier.get())
-                    < select_targets_info.first_new_line_idx
-                ):
-                    modifier = None
-            fixes = [
-                # Delete the first select target from its original location.
-                # We'll add it to the right section at the end, once we know
-                # what to add.
-                LintFix.delete(
-                    select_children[select_targets_info.first_select_target_idx],
-                ),
-            ]
-
-            # If we have a modifier to move:
-            if modifier:
-                # Add it to the insert
-                insert_buff = [WhitespaceSegment(), modifier[0]] + insert_buff
-
-                modifier_idx = select_children.index(modifier.get())
-                # Delete the whitespace after it (which is two after, thanks to indent)
-                if (
-                    len(select_children) > modifier_idx + 1
-                    and select_children[modifier_idx + 2].is_whitespace
-                ):
-                    fixes += [
-                        LintFix.delete(
-                            select_children[modifier_idx + 2],
-                        ),
-                    ]
-
-                # Delete the modifier itself
-                fixes += [
-                    LintFix.delete(
-                        modifier[0],
-                    ),
-                ]
-
-                # Set the position marker for removing the preceding
-                # whitespace and newline, which we'll use below.
-                start_idx = modifier_idx
-            else:
-                # Set the position marker for removing the preceding
-                # whitespace and newline, which we'll use below.
-                start_idx = select_targets_info.first_select_target_idx
-
-            if parent_stack and parent_stack[-1].is_type("select_statement"):
-                select_stmt = parent_stack[-1]
-                select_clause_idx = select_stmt.segments.index(select_clause.get())
-                after_select_clause_idx = select_clause_idx + 1
-                if len(select_stmt.segments) > after_select_clause_idx:
-
-                    def _fixes_for_move_after_select_clause(
-                        stop_seg: BaseSegment,
-                        delete_segments: Optional[Segments] = None,
-                        add_newline: bool = True,
-                    ) -> List[LintFix]:
-                        """Cleans up by moving leftover select_clause segments.
-
-                        Context: Some of the other fixes we make in
-                        _eval_single_select_target_element() leave leftover
-                        child segments that need to be moved to become
-                        *siblings* of the select_clause.
-                        """
-                        start_seg = (
-                            modifier[0]
-                            if modifier
-                            else select_children[select_targets_info.first_new_line_idx]
-                        )
-                        move_after_select_clause = select_children.select(
-                            start_seg=start_seg,
-                            stop_seg=stop_seg,
-                        )
-                        # :TRICKY: Below, we have a couple places where we
-                        # filter to guard against deleting the same segment
-                        # multiple times -- this is illegal.
-                        # :TRICKY: Use IdentitySet rather than set() since
-                        # different segments may compare as equal.
-                        all_deletes = IdentitySet(
-                            fix.anchor for fix in fixes if fix.edit_type == "delete"
-                        )
-                        fixes_ = []
-                        for seg in delete_segments or []:
-                            if seg not in all_deletes:
-                                fixes.append(LintFix.delete(seg))
-                                all_deletes.add(seg)
-                        fixes_ += [
-                            LintFix.delete(seg)
-                            for seg in move_after_select_clause
-                            if seg not in all_deletes
-                        ]
-                        fixes_.append(
-                            LintFix.create_after(
-                                select_clause[0],
-                                ([NewlineSegment()] if add_newline else [])
-                                + list(move_after_select_clause),
-                            )
-                        )
-                        return fixes_
-
-                    if select_stmt.segments[after_select_clause_idx].is_type("newline"):
-                        # Since we're deleting the newline, we should also delete all
-                        # whitespace before it or it will add random whitespace to
-                        # following statements. So walk back through the segment
-                        # deleting whitespace until you get the previous newline, or
-                        # something else.
-                        to_delete = select_children.reversed().select(
-                            loop_while=sp.is_type("whitespace"),
-                            start_seg=select_children[start_idx],
-                        )
-                        if to_delete:
-                            # The select_clause is immediately followed by a
-                            # newline. Delete the newline in order to avoid leaving
-                            # behind an empty line after fix, *unless* we stopped
-                            # due to something other than a newline.
-                            delete_last_newline = select_children[
-                                start_idx - len(to_delete) - 1
-                            ].is_type("newline")
-
-                            # Delete the newline if we decided to.
-                            if delete_last_newline:
-                                fixes.append(
-                                    LintFix.delete(
-                                        select_stmt.segments[after_select_clause_idx],
-                                    )
-                                )
-
-                            fixes += _fixes_for_move_after_select_clause(
-                                to_delete[-1], to_delete
-                            )
-                    elif select_stmt.segments[after_select_clause_idx].is_type(
-                        "whitespace"
-                    ):
-                        # The select_clause has stuff after (most likely a comment)
-                        # Delete the whitespace immediately after the select clause
-                        # so the other stuff aligns nicely based on where the select
-                        # clause started.
-                        fixes += [
-                            LintFix.delete(
-                                select_stmt.segments[after_select_clause_idx],
-                            ),
-                        ]
-                        fixes += _fixes_for_move_after_select_clause(
-                            select_children[
-                                select_targets_info.first_select_target_idx
-                            ],
-                        )
-                    elif select_stmt.segments[after_select_clause_idx].is_type(
-                        "dedent"
-                    ):
-                        # Again let's strip back the whitespace, but simpler
-                        # as don't need to worry about new line so just break
-                        # if see non-whitespace
-                        to_delete = select_children.reversed().select(
-                            loop_while=sp.is_type("whitespace"),
-                            start_seg=select_children[select_clause_idx - 1],
-                        )
-                        if to_delete:
-                            fixes += _fixes_for_move_after_select_clause(
-                                to_delete[-1],
-                                to_delete,
-                                # If we deleted a newline, create a newline.
-                                any(seg for seg in to_delete if seg.is_type("newline")),
-                            )
-                    else:
-                        fixes += _fixes_for_move_after_select_clause(
-                            select_children[
-                                select_targets_info.first_select_target_idx
-                            ],
-                        )
-
-            if select_targets_info.comment_after_select_idx == -1:
-                fixes += [
-                    # Insert the select_clause in place of the first newline in the
-                    # Select statement
-                    LintFix.replace(
-                        select_children[select_targets_info.first_new_line_idx],
-                        insert_buff,
-                    ),
-                ]
-            else:
-                # The SELECT is followed by a comment on the same line. In order
-                # to autofix this, we'd need to move the select target between
-                # SELECT and the comment and potentially delete the entire line
-                # where the select target was (if it is now empty). This is
-                # *fairly tricky and complex*, in part because the newline on
-                # the select target's line is several levels higher in the
-                # parser tree. Hence, we currently don't autofix this. Could be
-                # autofixed in the future if/when we have the time.
-                fixes = []
-            return LintResult(
-                anchor=select_clause.get(),
-                fixes=fixes,
-            )
-        return None
diff --git a/src/sqlfluff/rules/L038.py b/src/sqlfluff/rules/L038.py
deleted file mode 100644
index 608ad93..0000000
--- a/src/sqlfluff/rules/L038.py
+++ /dev/null
@@ -1,79 +0,0 @@
-"""Implementation of Rule L038."""
-from typing import Optional
-
-from sqlfluff.core.parser import BaseSegment, SymbolSegment
-
-from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
-from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.utils.functional import sp, FunctionalContext
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
-
-
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L038(BaseRule):
-    """Trailing commas within select clause.
-
-    .. note::
-       For many database backends this is allowed. For some users
-       this may be something they wish to enforce (in line with
-       Python best practice). Many database backends regard this
-       as a syntax error, and as such the `SQLFluff` default is to
-       forbid trailing commas in the select clause.
-
-    **Anti-pattern**
-
-    .. code-block:: sql
-
-        SELECT
-            a,
-            b,
-        FROM foo
-
-    **Best practice**
-
-    .. code-block:: sql
-
-        SELECT
-            a,
-            b
-        FROM foo
-    """
-
-    groups = ("all", "core")
-    config_keywords = ["select_clause_trailing_comma"]
-    crawl_behaviour = SegmentSeekerCrawler({"select_clause"})
-
-    def _eval(self, context: RuleContext) -> Optional[LintResult]:
-        """Trailing commas within select clause."""
-        # Config type hints
-        self.select_clause_trailing_comma: str
-
-        segment = FunctionalContext(context).segment
-        children = segment.children()
-        # Iterate content to find last element
-        last_content: BaseSegment = children.last(sp.is_code())[0]
-
-        # What mode are we in?
-        if self.select_clause_trailing_comma == "forbid":
-            # Is it a comma?
-            if last_content.is_type("comma"):
-                return LintResult(
-                    anchor=last_content,
-                    fixes=[LintFix.delete(last_content)],
-                    description="Trailing comma in select statement forbidden",
-                )
-        elif self.select_clause_trailing_comma == "require":
-            if not last_content.is_type("comma"):
-                new_comma = SymbolSegment(",", type="comma")
-                return LintResult(
-                    anchor=last_content,
-                    fixes=[LintFix.replace(last_content, [last_content, new_comma])],
-                    description="Trailing comma in select statement required",
-                )
-        return None
diff --git a/src/sqlfluff/rules/L039.py b/src/sqlfluff/rules/L039.py
deleted file mode 100644
index 0c0fd06..0000000
--- a/src/sqlfluff/rules/L039.py
+++ /dev/null
@@ -1,55 +0,0 @@
-"""Implementation of Rule L039."""
-from typing import List, Optional
-
-from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
-from sqlfluff.core.rules.crawlers import RootOnlyCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
-from sqlfluff.utils.reflow.sequence import ReflowSequence
-
-
-@document_groups
-@document_fix_compatible
-class Rule_L039(BaseRule):
-    """Unnecessary whitespace found.
-
-    **Anti-pattern**
-
-    .. code-block:: sql
-
-        SELECT
-            a,        b
-        FROM foo
-
-    **Best practice**
-
-    Unless an indent or preceding a comment, whitespace should
-    be a single space.
-
-    .. code-block:: sql
-
-        SELECT
-            a, b
-        FROM foo
-    """
-
-    groups = ("all", "core")
-    crawl_behaviour = RootOnlyCrawler()
-
-    def _eval(self, context: RuleContext) -> Optional[List[LintResult]]:
-        """Unnecessary whitespace."""
-        sequence = ReflowSequence.from_root(context.segment, config=context.config)
-        results = sequence.respace(filter="inline").get_results()
-
-        # For now, respace rules are separate for creation and reduction.
-        # That shouldn't be true in future.
-
-        # But, until then - "not enough whitespace" is handled in other
-        # rules and this one should just handle "too much" (or "wrong amount").
-
-        # That means we take the returned results, and only keep the ones
-        # that modify or remove whitespace.
-        return [
-            result
-            for result in results
-            if any(fix.edit_type in ("replace", "delete") for fix in result.fixes)
-        ]
diff --git a/src/sqlfluff/rules/L048.py b/src/sqlfluff/rules/L048.py
deleted file mode 100644
index bd4c675..0000000
--- a/src/sqlfluff/rules/L048.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""Implementation of Rule L048."""
-
-from typing import List
-
-from sqlfluff.core.rules.base import BaseRule, LintResult
-from sqlfluff.core.rules.context import RuleContext
-from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
-from sqlfluff.utils.reflow import ReflowSequence
-
-
-@document_groups
-@document_fix_compatible
-class Rule_L048(BaseRule):
-    """Quoted literals should be surrounded by a single whitespace.
-
-    **Anti-pattern**
-
-    In this example, there is a space missing between the string
-    ``'foo'`` and the keyword ``AS``.
-
-    .. code-block:: sql
-
-        SELECT
-            'foo'AS bar
-        FROM foo
-
-
-    **Best practice**
-
-    Keep a single space.
-
-    .. code-block:: sql
-
-        SELECT
-            'foo' AS bar
-        FROM foo
-    """
-
-    groups = ("all", "core")
-    crawl_behaviour = SegmentSeekerCrawler(
-        {"quoted_literal", "date_constructor_literal"}
-    )
-
-    def _eval(self, context: RuleContext) -> List[LintResult]:
-        """Quoted literals should be surrounded by a single whitespace."""
-        return (
-            ReflowSequence.from_around_target(
-                context.segment, context.parent_stack[0], config=context.config
-            )
-            .respace()
-            .get_results()
-        )
diff --git a/src/sqlfluff/rules/L071.py b/src/sqlfluff/rules/L071.py
deleted file mode 100644
index 0a32b58..0000000
--- a/src/sqlfluff/rules/L071.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""Implementation of Rule L071."""
-
-from typing import List, Optional
-
-from sqlfluff.core.rules.base import BaseRule, LintResult
-from sqlfluff.core.rules.context import RuleContext
-from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
-
-from sqlfluff.utils.reflow.sequence import ReflowSequence
-
-
-@document_groups
-@document_fix_compatible
-class Rule_L071(BaseRule):
-    """Parenthesis blocks should be surrounded by whitespaces.
-
-    **Anti-pattern**
-
-    In this example, there is a space missing between the parenthesis block
-    ``( ... )`` and the keyword ``FROM`` and the keyword ``AS``.
-
-    .. code-block:: sql
-
-        SELECT * FROM(SELECT 1 AS C1)AS T1;
-
-    **Best practice**
-
-    Keep a single space.
-
-    .. code-block:: sql
-
-        SELECT * FROM (SELECT 1 AS C1) AS T1;
-
-    """
-
-    groups = ("all", "core")
-    crawl_behaviour = SegmentSeekerCrawler(
-        {"start_bracket", "end_bracket"}, provide_raw_stack=True
-    )
-
-    def _eval(self, context: RuleContext) -> Optional[List[LintResult]]:
-        """Parenthesis blocks should be surrounded by whitespaces."""
-        if (
-            context.segment.is_type("start_bracket")
-            and len(context.raw_stack) > 2
-            and context.raw_stack[-2].is_type("keyword")
-        ):
-            # and prior section not from or where
-            # reserved_keywords
-            return (
-                ReflowSequence.from_around_target(
-                    context.segment,
-                    context.parent_stack[0],
-                    config=context.config,
-                    sides="before",
-                )
-                .respace()
-                .get_results()
-            )
-        if context.segment.is_type("end_bracket"):
-            return (
-                ReflowSequence.from_around_target(
-                    context.segment,
-                    context.parent_stack[0],
-                    config=context.config,
-                    sides="after",
-                )
-                .respace()
-                .get_results()
-            )
-
-        return None
diff --git a/src/sqlfluff/rules/__init__.py b/src/sqlfluff/rules/__init__.py
index 1aaff5f..b73750c 100644
--- a/src/sqlfluff/rules/__init__.py
+++ b/src/sqlfluff/rules/__init__.py
@@ -1,20 +1 @@
 """Standard Rules packaged with sqlfluff."""
-
-
-from sqlfluff.core.plugin.host import get_plugin_manager
-
-# Sphinx effectively runs an import * from this module in rules.rst, so initialise
-# __all__ with an empty list before we populate it with the rule names.
-__all__ = []
-
-# Iterate through the rules list and register each rule as a global for documentation
-for plugin_rules in get_plugin_manager().hook.get_rules():
-    for rule in plugin_rules:
-        # Add the Rule classes to the module namespace with globals() so that they can
-        # be found by Sphinx automodule documentation in rules.rst
-        # The result is the same as declaring the classes in this file.
-        # Rules coming from the "Example" plugin are excluded from the
-        # documentation.
-        globals()[rule.__name__] = rule
-        # Add the rule class names to __all__ for Sphinx automodule discovery
-        __all__.append(rule.__name__)
diff --git a/src/sqlfluff/rules/L011.py b/src/sqlfluff/rules/aliasing/AL01.py
similarity index 55%
rename from src/sqlfluff/rules/L011.py
rename to src/sqlfluff/rules/aliasing/AL01.py
index df1abca..06142f8 100644
--- a/src/sqlfluff/rules/L011.py
+++ b/src/sqlfluff/rules/aliasing/AL01.py
@@ -1,5 +1,5 @@
-"""Implementation of Rule L011."""
-from typing import List, Optional, Tuple
+"""Implementation of Rule AL01."""
+from typing import Optional, Tuple
 
 from sqlfluff.core.parser import (
     KeywordSegment,
@@ -7,18 +7,10 @@ from sqlfluff.core.parser import (
 
 from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
 from sqlfluff.utils.reflow import ReflowSequence
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L011(BaseRule):
+class Rule_AL01(BaseRule):
     """Implicit/explicit aliasing of table.
 
     Aliasing of table to follow preference
@@ -46,51 +38,66 @@ class Rule_L011(BaseRule):
 
     """
 
-    groups: Tuple[str, ...] = ("all",)
+    name = "aliasing.table"
+    aliases = ("L011",)
+    groups: Tuple[str, ...] = ("all", "aliasing")
     config_keywords = ["aliasing"]
     crawl_behaviour = SegmentSeekerCrawler({"alias_expression"}, provide_raw_stack=True)
+    is_fix_compatible = True
 
-    _target_elems: List[Tuple[str, str]] = [
-        ("type", "from_expression_element"),
-        ("type", "merge_statement"),
-    ]
+    _target_parent_types: Tuple[str, ...] = (
+        "from_expression_element",
+        "merge_statement",
+    )
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
         """Implicit aliasing of table/column not allowed. Use explicit `AS` clause.
 
         We look for the alias segment, and then evaluate its parent and whether
-        it contains an AS keyword. This is the _eval function for both L011 and L012.
+        it contains an AS keyword. This is the _eval function for both AL01 and AL02.
         """
         # Config type hints
         self.aliasing: str
 
         assert context.segment.is_type("alias_expression")
-        if self.matches_target_tuples(context.parent_stack[-1], self._target_elems):
-            if any(e.raw_upper == "AS" for e in context.segment.segments):
+        if context.parent_stack[-1].is_type(*self._target_parent_types):
+            # Search for an AS keyword.
+            for as_keyword in context.segment.segments:
+                if as_keyword.raw_upper == "AS":
+                    break
+            else:
+                as_keyword = None
+
+            if as_keyword:
                 if self.aliasing == "implicit":
-                    if context.segment.segments[0].raw_upper == "AS":
-                        self.logger.debug("Removing AS keyword and respacing.")
-                        as_keyword = context.segment.segments[0]
-                        return LintResult(
-                            anchor=as_keyword,
-                            # Generate the fixes to remove and respace accordingly.
-                            fixes=ReflowSequence.from_around_target(
-                                as_keyword,
-                                context.parent_stack[0],
-                                config=context.config,
-                            )
-                            .without(as_keyword)
-                            .respace()
-                            .get_fixes(),
+                    self.logger.debug("Removing AS keyword and respacing.")
+                    return LintResult(
+                        anchor=as_keyword,
+                        # Generate the fixes to remove and respace accordingly.
+                        fixes=ReflowSequence.from_around_target(
+                            as_keyword,
+                            context.parent_stack[0],
+                            config=context.config,
                         )
+                        .without(as_keyword)
+                        .respace()
+                        .get_fixes(),
+                    )
 
             elif self.aliasing != "implicit":
                 self.logger.debug("Inserting AS keyword and respacing.")
+                for identifier in context.segment.raw_segments:
+                    if identifier.is_code:
+                        break
+                else:  # pragma: no cover
+                    raise NotImplementedError(
+                        "Failed to find identifier. Raise this as a bug on GitHub."
+                    )
                 return LintResult(
                     anchor=context.segment,
                     # Work out the insertion and reflow fixes.
                     fixes=ReflowSequence.from_around_target(
-                        context.segment.raw_segments[0],
+                        identifier,
                         context.parent_stack[0],
                         config=context.config,
                         # Only reflow before, otherwise we catch too much.
@@ -98,7 +105,7 @@ class Rule_L011(BaseRule):
                     )
                     .insert(
                         KeywordSegment("AS"),
-                        target=context.segment.raw_segments[0],
+                        target=identifier,
                         pos="before",
                     )
                     .respace()
diff --git a/src/sqlfluff/rules/L012.py b/src/sqlfluff/rules/aliasing/AL02.py
similarity index 68%
rename from src/sqlfluff/rules/L012.py
rename to src/sqlfluff/rules/aliasing/AL02.py
index b4801ea..b1346d7 100644
--- a/src/sqlfluff/rules/L012.py
+++ b/src/sqlfluff/rules/aliasing/AL02.py
@@ -1,15 +1,12 @@
-"""Implementation of Rule L012."""
+"""Implementation of Rule AL02."""
 from typing import Optional
 
-from sqlfluff.rules.L011 import Rule_L011
-from sqlfluff.core.rules.doc_decorators import document_configuration, document_groups
+from sqlfluff.rules.aliasing.AL01 import Rule_AL01
 from sqlfluff.core.rules import LintResult, RuleContext
 from sqlfluff.utils.functional import FunctionalContext
 
 
-@document_groups
-@document_configuration
-class Rule_L012(Rule_L011):
+class Rule_AL02(Rule_AL01):
     """Implicit/explicit aliasing of columns.
 
     Aliasing of columns to follow preference
@@ -37,16 +34,16 @@ class Rule_L012(Rule_L011):
 
     """
 
-    groups = ("all", "core")
+    name = "aliasing.column"
+    aliases = ("L012",)
+    groups = ("all", "core", "aliasing")
     config_keywords = ["aliasing"]
-    # NB: crawl_behaviour is the same as Rule L011
+    # NB: crawl_behaviour is the same as Rule AL01
 
-    _target_elems = [
-        ("type", "select_clause_element"),
-    ]
+    _target_parent_types = ("select_clause_element",)
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
-        # T-SQL supports alternative alias expressions for L012
+        # T-SQL supports alternative alias expressions for AL02
         # select alias = value
         # instead of
         # select value as alias
diff --git a/src/sqlfluff/rules/L013.py b/src/sqlfluff/rules/aliasing/AL03.py
similarity index 95%
rename from src/sqlfluff/rules/L013.py
rename to src/sqlfluff/rules/aliasing/AL03.py
index d2a7eac..c0e9cc4 100644
--- a/src/sqlfluff/rules/L013.py
+++ b/src/sqlfluff/rules/aliasing/AL03.py
@@ -1,15 +1,12 @@
-"""Implementation of Rule L013."""
+"""Implementation of Rule AL03."""
 from typing import Optional
 
 from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_configuration, document_groups
 from sqlfluff.utils.functional import Segments, sp, FunctionalContext
 
 
-@document_groups
-@document_configuration
-class Rule_L013(BaseRule):
+class Rule_AL03(BaseRule):
     """Column expression without alias. Use explicit `AS` clause.
 
     **Anti-pattern**
@@ -36,7 +33,9 @@ class Rule_L013(BaseRule):
 
     """
 
-    groups = ("all", "core")
+    name = "aliasing.expression"
+    aliases = ("L013",)
+    groups = ("all", "core", "aliasing")
     config_keywords = ["allow_scalar"]
     crawl_behaviour = SegmentSeekerCrawler({"select_clause_element"})
 
diff --git a/src/sqlfluff/rules/L020.py b/src/sqlfluff/rules/aliasing/AL04.py
similarity index 95%
rename from src/sqlfluff/rules/L020.py
rename to src/sqlfluff/rules/aliasing/AL04.py
index 54c4d89..fd50784 100644
--- a/src/sqlfluff/rules/L020.py
+++ b/src/sqlfluff/rules/aliasing/AL04.py
@@ -1,4 +1,4 @@
-"""Implementation of Rule L020."""
+"""Implementation of Rule AL04."""
 
 import itertools
 from typing import List, Optional, Tuple
@@ -8,11 +8,9 @@ from sqlfluff.core.parser import BaseSegment
 from sqlfluff.core.rules import BaseRule, LintResult, RuleContext, EvalResultType
 from sqlfluff.utils.analysis.select import get_select_statement_info
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_groups
 
 
-@document_groups
-class Rule_L020(BaseRule):
+class Rule_AL04(BaseRule):
     """Table aliases should be unique within each clause.
 
     Reusing table aliases is very likely a coding error.
@@ -61,7 +59,9 @@ class Rule_L020(BaseRule):
 
     """
 
-    groups: Tuple[str, ...] = ("all", "core")
+    name = "aliasing.unique.table"
+    aliases = ("L020",)
+    groups: Tuple[str, ...] = ("all", "core", "aliasing", "aliasing.unique")
     crawl_behaviour = SegmentSeekerCrawler({"select_statement"})
 
     def _lint_references_and_aliases(
diff --git a/src/sqlfluff/rules/L025.py b/src/sqlfluff/rules/aliasing/AL05.py
similarity index 89%
rename from src/sqlfluff/rules/L025.py
rename to src/sqlfluff/rules/aliasing/AL05.py
index 181a469..d834e2b 100644
--- a/src/sqlfluff/rules/L025.py
+++ b/src/sqlfluff/rules/aliasing/AL05.py
@@ -1,4 +1,4 @@
-"""Implementation of Rule L025."""
+"""Implementation of Rule AL05."""
 
 from dataclasses import dataclass, field
 from typing import cast, List, Set
@@ -18,22 +18,19 @@ from sqlfluff.core.rules import (
     EvalResultType,
 )
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 from sqlfluff.utils.functional import Segments, sp
 from sqlfluff.core.dialects.common import AliasInfo
 
 
 @dataclass
-class L025Query(SelectCrawlerQuery):
-    """SelectCrawler Query with custom L025 info."""
+class AL05Query(SelectCrawlerQuery):
+    """SelectCrawler Query with custom AL05 info."""
 
     aliases: List[AliasInfo] = field(default_factory=list)
     tbl_refs: Set[str] = field(default_factory=set)
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L025(BaseRule):
+class Rule_AL05(BaseRule):
     """Tables should not be aliased if that alias is not used.
 
     **Anti-pattern**
@@ -63,12 +60,15 @@ class Rule_L025(BaseRule):
 
     """
 
-    groups = ("all", "core")
+    name = "aliasing.unused"
+    aliases = ("L025",)
+    groups = ("all", "core", "aliasing")
     crawl_behaviour = SegmentSeekerCrawler({"select_statement"})
     _dialects_requiring_alias_for_values_clause = [
         "snowflake",
         "tsql",
     ]
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> EvalResultType:
         violations: List[LintResult] = []
@@ -79,15 +79,15 @@ class Rule_L025(BaseRule):
             return None
 
         # Analyze the SELECT.
-        crawler = SelectCrawler(context.segment, context.dialect, query_class=L025Query)
-        query: L025Query = cast(L025Query, crawler.query_tree)
+        crawler = SelectCrawler(context.segment, context.dialect, query_class=AL05Query)
+        query: AL05Query = cast(AL05Query, crawler.query_tree)
         self._analyze_table_aliases(query, context.dialect)
 
         alias: AliasInfo
         for alias in query.aliases:
             # Skip alias if it's required (some dialects require aliases for
             # VALUES clauses).
-            if alias.from_expression_element and self.is_alias_required(
+            if alias.from_expression_element and self._is_alias_required(
                 alias.from_expression_element, context.dialect.name
             ):
                 continue
@@ -98,7 +98,7 @@ class Rule_L025(BaseRule):
         return violations or None
 
     @classmethod
-    def is_alias_required(
+    def _is_alias_required(
         cls, from_expression_element: BaseSegment, dialect_name: str
     ) -> bool:
         """Given an alias, is it REQUIRED to be present?
@@ -139,7 +139,7 @@ class Rule_L025(BaseRule):
         return False  # pragma: no cover
 
     @classmethod
-    def _analyze_table_aliases(cls, query: L025Query, dialect: Dialect):
+    def _analyze_table_aliases(cls, query: AL05Query, dialect: Dialect):
         # Get table aliases defined in query.
         for selectable in query.selectables:
             select_info = selectable.select_info
@@ -159,17 +159,17 @@ class Rule_L025(BaseRule):
 
         # Visit children.
         for child in query.children:
-            cls._analyze_table_aliases(cast(L025Query, child), dialect)
+            cls._analyze_table_aliases(cast(AL05Query, child), dialect)
 
     @classmethod
-    def _resolve_and_mark_reference(cls, query: L025Query, ref: str):
+    def _resolve_and_mark_reference(cls, query: AL05Query, ref: str):
         # Does this query define the referenced alias?
         if any(ref == a.ref_str for a in query.aliases):
             # Yes. Record the reference.
             query.tbl_refs.add(ref)
         elif query.parent:
             # No. Recursively check the query's parent hierarchy.
-            cls._resolve_and_mark_reference(cast(L025Query, query.parent), ref)
+            cls._resolve_and_mark_reference(cast(AL05Query, query.parent), ref)
 
     @classmethod
     def _report_unused_alias(cls, alias: AliasInfo) -> LintResult:
diff --git a/src/sqlfluff/rules/L066.py b/src/sqlfluff/rules/aliasing/AL06.py
similarity index 94%
rename from src/sqlfluff/rules/L066.py
rename to src/sqlfluff/rules/aliasing/AL06.py
index 20a09f7..8b340bf 100644
--- a/src/sqlfluff/rules/L066.py
+++ b/src/sqlfluff/rules/aliasing/AL06.py
@@ -1,19 +1,13 @@
-"""Implementation of Rule L066."""
+"""Implementation of Rule AL06."""
 
 from typing import Optional
 
 from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_groups,
-)
 from sqlfluff.utils.functional import FunctionalContext
 
 
-@document_groups
-@document_configuration
-class Rule_L066(BaseRule):
+class Rule_AL06(BaseRule):
     """Enforce table alias lengths in from clauses and join conditions.
 
     **Anti-pattern**
@@ -31,7 +25,7 @@ class Rule_L066(BaseRule):
 
     Avoid aliases. Avoid short aliases when aliases are necessary.
 
-    See also: L031.
+    See also: :class:`Rule_AL07`.
 
     .. code-block:: sql
 
@@ -49,7 +43,9 @@ class Rule_L066(BaseRule):
             ON replacement_orders.id = previous_orders.replacement_id
     """
 
-    groups = ("all",)
+    name = "aliasing.length"
+    aliases = ("L066",)
+    groups = ("all", "core", "aliasing")
     config_keywords = ["min_alias_length", "max_alias_length"]
     crawl_behaviour = SegmentSeekerCrawler({"select_statement"})
 
diff --git a/src/sqlfluff/rules/L031.py b/src/sqlfluff/rules/aliasing/AL07.py
similarity index 91%
rename from src/sqlfluff/rules/L031.py
rename to src/sqlfluff/rules/aliasing/AL07.py
index 9d54a5a..2e55fd9 100644
--- a/src/sqlfluff/rules/L031.py
+++ b/src/sqlfluff/rules/aliasing/AL07.py
@@ -1,4 +1,4 @@
-"""Implementation of Rule L031."""
+"""Implementation of Rule AL07."""
 
 from collections import Counter, defaultdict
 from typing import Generator, NamedTuple, Optional
@@ -6,11 +6,6 @@ from typing import Generator, NamedTuple, Optional
 from sqlfluff.core.parser import BaseSegment
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
 
 from sqlfluff.utils.functional import sp, FunctionalContext
 
@@ -24,10 +19,7 @@ class TableAliasInfo(NamedTuple):
     alias_identifier_ref: BaseSegment
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L031(BaseRule):
+class Rule_AL07(BaseRule):
     """Avoid table aliases in from clauses and join conditions.
 
     .. note::
@@ -39,12 +31,18 @@ class Rule_L031(BaseRule):
         harder to understand what the table called "c" is compared to "customers".
 
        This rule is controversial and for many larger databases avoiding alias is
-       neither realistic nor desirable. In this case this rule should be disabled.
+       neither realistic nor desirable. In particular for BigQuery due to the
+       complexity of backtick requirements and determining whether a name refers
+       to a project or dataset so automated fixes can potentially break working
+       SQL code. For most users :class:`Rule_AL06` is likely a more appropriate
+       linting rule to drive a sensible behaviour around aliasing.
 
-       This rule is disabled by default for BigQuery due to the complexity of
-       backtick requirements and determining whether a name refers to a project
-       or dataset, and automated fixes can potentially break working SQL code..
-       It can be enabled with the ``force_enable = True`` flag.
+       The stricter treatment of aliases in this rule may be useful for more
+       focused projects, or temporarily as a refactoring tool because the
+       :code:`fix` routine of the rule can remove aliases.
+
+       This rule is disabled by default for all dialects it can be enabled with
+       the ``force_enable = True`` flag.
 
     **Anti-pattern**
 
@@ -84,10 +82,12 @@ class Rule_L031(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "aliasing.forbid"
+    aliases = ("L031",)
+    groups = ("all", "aliasing")
     config_keywords = ["force_enable"]
     crawl_behaviour = SegmentSeekerCrawler({"select_statement"})
-    _dialects_disabled_by_default = ["bigquery"]
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
         """Identify aliases in from clause and join conditions.
@@ -107,10 +107,7 @@ class Rule_L031(BaseRule):
         # to BigQuery when it is looking at the query, it would be complex for
         # this rule to do the right thing. For now, the rule simply disables
         # itself.
-        if (
-            context.dialect.name in self._dialects_disabled_by_default
-            and not self.force_enable
-        ):
+        if not self.force_enable:
             return LintResult()
 
         assert context.segment.is_type("select_statement")
diff --git a/src/sqlfluff/rules/aliasing/__init__.py b/src/sqlfluff/rules/aliasing/__init__.py
new file mode 100644
index 0000000..799c54f
--- /dev/null
+++ b/src/sqlfluff/rules/aliasing/__init__.py
@@ -0,0 +1,17 @@
+"""The aliasing plugin bundle."""
+
+from sqlfluff.core.plugin import hookimpl
+
+from sqlfluff.rules.aliasing.AL01 import Rule_AL01
+from sqlfluff.rules.aliasing.AL02 import Rule_AL02
+from sqlfluff.rules.aliasing.AL03 import Rule_AL03
+from sqlfluff.rules.aliasing.AL04 import Rule_AL04
+from sqlfluff.rules.aliasing.AL05 import Rule_AL05
+from sqlfluff.rules.aliasing.AL06 import Rule_AL06
+from sqlfluff.rules.aliasing.AL07 import Rule_AL07
+
+
+@hookimpl
+def get_rules():
+    """Get plugin rules."""
+    return [Rule_AL01, Rule_AL02, Rule_AL03, Rule_AL04, Rule_AL05, Rule_AL06, Rule_AL07]
diff --git a/src/sqlfluff/rules/L021.py b/src/sqlfluff/rules/ambiguous/AM01.py
similarity index 89%
rename from src/sqlfluff/rules/L021.py
rename to src/sqlfluff/rules/ambiguous/AM01.py
index ad09efa..dcaaaab 100644
--- a/src/sqlfluff/rules/L021.py
+++ b/src/sqlfluff/rules/ambiguous/AM01.py
@@ -1,14 +1,12 @@
-"""Implementation of Rule L021."""
-from typing import Optional
+"""Implementation of Rule AM01."""
+from typing import Optional, Tuple
 
 from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
 from sqlfluff.utils.functional import sp, FunctionalContext
-from sqlfluff.core.rules.doc_decorators import document_groups
 
 
-@document_groups
-class Rule_L021(BaseRule):
+class Rule_AM01(BaseRule):
     """Ambiguous use of ``DISTINCT`` in a ``SELECT`` statement with ``GROUP BY``.
 
     When using ``GROUP BY`` a `DISTINCT`` clause should not be necessary as every
@@ -36,7 +34,9 @@ class Rule_L021(BaseRule):
         FROM foo
     """
 
-    groups = ("all", "core")
+    name = "ambiguous.distinct"
+    aliases = ("L021",)
+    groups: Tuple[str, ...] = ("all", "core", "ambiguous")
     crawl_behaviour = SegmentSeekerCrawler({"select_statement"})
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
diff --git a/src/sqlfluff/rules/L033.py b/src/sqlfluff/rules/ambiguous/AM02.py
similarity index 92%
rename from src/sqlfluff/rules/L033.py
rename to src/sqlfluff/rules/ambiguous/AM02.py
index 313a365..00faf01 100644
--- a/src/sqlfluff/rules/L033.py
+++ b/src/sqlfluff/rules/ambiguous/AM02.py
@@ -1,4 +1,5 @@
-"""Implementation of Rule L033."""
+"""Implementation of Rule AM02."""
+from typing import Tuple
 from sqlfluff.core.parser import (
     WhitespaceSegment,
     KeywordSegment,
@@ -6,11 +7,9 @@ from sqlfluff.core.parser import (
 
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_groups
 
 
-@document_groups
-class Rule_L033(BaseRule):
+class Rule_AM02(BaseRule):
     """``UNION [DISTINCT|ALL]`` is preferred over just ``UNION``.
 
     .. note::
@@ -41,8 +40,11 @@ class Rule_L033(BaseRule):
 
     """
 
-    groups = ("all", "core")
+    name = "ambiguous.union"
+    aliases = ("L033",)
+    groups: Tuple[str, ...] = ("all", "core", "ambiguous")
     crawl_behaviour = SegmentSeekerCrawler({"set_operator"})
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> LintResult:
         """Look for UNION keyword not immediately followed by DISTINCT or ALL.
diff --git a/src/sqlfluff/rules/L037.py b/src/sqlfluff/rules/ambiguous/AM03.py
similarity index 87%
rename from src/sqlfluff/rules/L037.py
rename to src/sqlfluff/rules/ambiguous/AM03.py
index a7f04a4..8aa48dc 100644
--- a/src/sqlfluff/rules/L037.py
+++ b/src/sqlfluff/rules/ambiguous/AM03.py
@@ -1,25 +1,22 @@
-"""Implementation of Rule L037."""
+"""Implementation of Rule AM03."""
 
-from typing import NamedTuple, Optional, List
+from typing import NamedTuple, Optional, List, Tuple
 
 from sqlfluff.core.parser import WhitespaceSegment, KeywordSegment
 
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.core.parser import BaseSegment
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 
 
 class OrderByColumnInfo(NamedTuple):
-    """For L037, segment that ends an ORDER BY column and any order provided."""
+    """For AM03, segment that ends an ORDER BY column and any order provided."""
 
     column_reference: BaseSegment
     order: Optional[str]  # One of 'ASC'/'DESC'/None
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L037(BaseRule):
+class Rule_AM03(BaseRule):
     """Ambiguous ordering directions for columns in order by clause.
 
     **Anti-pattern**
@@ -44,8 +41,11 @@ class Rule_L037(BaseRule):
         ORDER BY a ASC, b DESC
     """
 
-    groups = ("all",)
+    name = "ambiguous.order_by"
+    aliases = ("L037",)
+    groups: Tuple[str, ...] = ("all", "ambiguous")
     crawl_behaviour = SegmentSeekerCrawler({"orderby_clause"})
+    is_fix_compatible = True
 
     @staticmethod
     def _get_orderby_info(segment: BaseSegment) -> List[OrderByColumnInfo]:
@@ -62,12 +62,7 @@ class Rule_L037(BaseRule):
                 "DESC",
             ):
                 ordering_reference = child_segment.raw_upper
-            elif column_reference and child_segment.type not in [
-                "keyword",
-                "whitespace",
-                "indent",
-                "dedent",
-            ]:
+            if column_reference and child_segment.raw == ",":
                 result.append(
                     OrderByColumnInfo(
                         column_reference=column_reference, order=ordering_reference
diff --git a/src/sqlfluff/rules/L044.py b/src/sqlfluff/rules/ambiguous/AM04.py
similarity index 96%
rename from src/sqlfluff/rules/L044.py
rename to src/sqlfluff/rules/ambiguous/AM04.py
index 8b92ef9..4c727a0 100644
--- a/src/sqlfluff/rules/L044.py
+++ b/src/sqlfluff/rules/ambiguous/AM04.py
@@ -1,11 +1,10 @@
-"""Implementation of Rule L044."""
-from typing import Optional
+"""Implementation of Rule AM04."""
+from typing import Optional, Tuple
 
 from sqlfluff.utils.analysis.select_crawler import Query, SelectCrawler
 from sqlfluff.core.parser import BaseSegment
 from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_groups
 from sqlfluff.utils.functional import sp, FunctionalContext
 
 
@@ -19,8 +18,7 @@ class RuleFailure(Exception):
         self.anchor: BaseSegment = anchor
 
 
-@document_groups
-class Rule_L044(BaseRule):
+class Rule_AM04(BaseRule):
     """Query produces an unknown number of result columns.
 
     **Anti-pattern**
@@ -68,7 +66,9 @@ class Rule_L044(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "ambiguous.column_count"
+    aliases = ("L044",)
+    groups: Tuple[str, ...] = ("all", "ambiguous")
     crawl_behaviour = SegmentSeekerCrawler(set(_START_TYPES))
 
     def _handle_alias(self, selectable, alias_info, query):
diff --git a/src/sqlfluff/rules/L051.py b/src/sqlfluff/rules/ambiguous/AM05.py
similarity index 88%
rename from src/sqlfluff/rules/L051.py
rename to src/sqlfluff/rules/ambiguous/AM05.py
index 178a317..4c2e217 100644
--- a/src/sqlfluff/rules/L051.py
+++ b/src/sqlfluff/rules/ambiguous/AM05.py
@@ -1,20 +1,12 @@
-"""Implementation of Rule L051."""
-from typing import Optional
+"""Implementation of Rule AM05."""
+from typing import Optional, Tuple
 from sqlfluff.core.parser.segments.raw import KeywordSegment, WhitespaceSegment
 
 from sqlfluff.core.rules import BaseRule, LintResult, LintFix, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L051(BaseRule):
+class Rule_AM05(BaseRule):
     """Join clauses should be fully qualified.
 
     By default this rule is configured to enforce fully qualified ``INNER JOIN``
@@ -23,7 +15,7 @@ class Rule_L051(BaseRule):
 
     **Anti-pattern**
 
-    A join is specified without expliciting the **kind** of join.
+    A join is used without specifying the **kind** of join.
 
     .. code-block:: sql
        :force:
@@ -46,9 +38,12 @@ class Rule_L051(BaseRule):
         INNER JOIN baz;
     """
 
-    groups = ("all",)
+    name = "ambiguous.join"
+    aliases = ("L051",)
+    groups: Tuple[str, ...] = ("all", "ambiguous")
     config_keywords = ["fully_qualify_join_types"]
     crawl_behaviour = SegmentSeekerCrawler({"join_clause"})
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
         """Fully qualify JOINs."""
diff --git a/src/sqlfluff/rules/L054.py b/src/sqlfluff/rules/ambiguous/AM06.py
similarity index 95%
rename from src/sqlfluff/rules/L054.py
rename to src/sqlfluff/rules/ambiguous/AM06.py
index 2d68ba5..b903d37 100644
--- a/src/sqlfluff/rules/L054.py
+++ b/src/sqlfluff/rules/ambiguous/AM06.py
@@ -1,15 +1,12 @@
-"""Implementation of Rule L054."""
-from typing import Optional, List
+"""Implementation of Rule AM06."""
+from typing import Optional, List, Tuple
 
 from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_configuration, document_groups
 from sqlfluff.utils.functional import sp, FunctionalContext
 
 
-@document_groups
-@document_configuration
-class Rule_L054(BaseRule):
+class Rule_AM06(BaseRule):
     """Inconsistent column references in ``GROUP BY/ORDER BY`` clauses.
 
     .. note::
@@ -83,7 +80,9 @@ class Rule_L054(BaseRule):
             1, 2;
     """
 
-    groups = ("all", "core")
+    name = "ambiguous.column_references"
+    aliases = ("L054",)
+    groups: Tuple[str, ...] = ("all", "core", "ambiguous")
     config_keywords = ["group_by_and_order_by_style"]
     crawl_behaviour = SegmentSeekerCrawler({"groupby_clause", "orderby_clause"})
     _ignore_types: List[str] = ["withingroup_clause", "window_specification"]
diff --git a/src/sqlfluff/rules/L068.py b/src/sqlfluff/rules/ambiguous/AM07.py
similarity index 97%
rename from src/sqlfluff/rules/L068.py
rename to src/sqlfluff/rules/ambiguous/AM07.py
index 9abbfbc..44fe715 100644
--- a/src/sqlfluff/rules/L068.py
+++ b/src/sqlfluff/rules/ambiguous/AM07.py
@@ -1,14 +1,12 @@
-"""Implementation of Rule L068."""
-from typing import Optional
+"""Implementation of Rule AM07."""
+from typing import Optional, Tuple
 
 from sqlfluff.utils.analysis.select_crawler import Query, SelectCrawler, WildcardInfo
 from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_groups
 
 
-@document_groups
-class Rule_L068(BaseRule):
+class Rule_AM07(BaseRule):
     """Queries within set query produce different numbers of columns.
 
     **Anti-pattern**
@@ -52,7 +50,9 @@ class Rule_L068(BaseRule):
         FROM t
     """
 
-    groups = ("all",)
+    name = "ambiguous.set_columns"
+    aliases = ("L068",)
+    groups: Tuple[str, ...] = ("all", "ambiguous")
     crawl_behaviour = SegmentSeekerCrawler({"set_expression"}, provide_raw_stack=True)
 
     def __handle_alias_case(
diff --git a/src/sqlfluff/rules/ambiguous/__init__.py b/src/sqlfluff/rules/ambiguous/__init__.py
new file mode 100644
index 0000000..a9d9ea4
--- /dev/null
+++ b/src/sqlfluff/rules/ambiguous/__init__.py
@@ -0,0 +1,20 @@
+"""The ambiguous plugin bundle.
+
+NOTE: Yes the title of this bundle is ...ambiguous. 😁
+"""
+
+from sqlfluff.core.plugin import hookimpl
+
+from sqlfluff.rules.ambiguous.AM01 import Rule_AM01
+from sqlfluff.rules.ambiguous.AM02 import Rule_AM02
+from sqlfluff.rules.ambiguous.AM03 import Rule_AM03
+from sqlfluff.rules.ambiguous.AM04 import Rule_AM04
+from sqlfluff.rules.ambiguous.AM05 import Rule_AM05
+from sqlfluff.rules.ambiguous.AM06 import Rule_AM06
+from sqlfluff.rules.ambiguous.AM07 import Rule_AM07
+
+
+@hookimpl
+def get_rules():
+    """Get plugin rules."""
+    return [Rule_AM01, Rule_AM02, Rule_AM03, Rule_AM04, Rule_AM05, Rule_AM06, Rule_AM07]
diff --git a/src/sqlfluff/rules/L010.py b/src/sqlfluff/rules/capitalisation/CP01.py
similarity index 83%
rename from src/sqlfluff/rules/L010.py
rename to src/sqlfluff/rules/capitalisation/CP01.py
index 68e9c91..4de7e79 100644
--- a/src/sqlfluff/rules/L010.py
+++ b/src/sqlfluff/rules/capitalisation/CP01.py
@@ -1,4 +1,4 @@
-"""Implementation of Rule L010."""
+"""Implementation of Rule CP01."""
 
 import regex
 from typing import Tuple, List, Optional
@@ -6,11 +6,6 @@ from sqlfluff.core.parser import BaseSegment
 from sqlfluff.core.rules import BaseRule, LintResult, LintFix, RuleContext
 from sqlfluff.core.rules.config_info import get_config_info
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
 
 
 def is_capitalizable(character: str) -> bool:
@@ -20,10 +15,7 @@ def is_capitalizable(character: str) -> bool:
     return True
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L010(BaseRule):
+class Rule_CP01(BaseRule):
     """Inconsistent capitalisation of keywords.
 
     **Anti-pattern**
@@ -53,19 +45,21 @@ class Rule_L010(BaseRule):
         from foo
     """
 
-    groups: Tuple[str, ...] = ("all", "core")
+    name = "capitalisation.keywords"
+    aliases = ("L010",)
+    groups: Tuple[str, ...] = ("all", "core", "capitalisation")
+    is_fix_compatible = True
+
     lint_phase = "post"
     # Binary operators behave like keywords too.
     crawl_behaviour = SegmentSeekerCrawler({"keyword", "binary_operator", "date_part"})
-    # Skip boolean and null literals (which are also keywords)
-    # as they have their own rule (L040)
-    _exclude_elements: List[Tuple[str, str]] = [
-        ("type", "null_literal"),
-        ("type", "boolean_literal"),
-        ("parenttype", "data_type"),
-        ("parenttype", "datetime_type_identifier"),
-        ("parenttype", "primitive_type"),
-    ]
+    # Skip literals (which are also keywords) as they have their own rule (CP04)
+    _exclude_types: Tuple[str, ...] = ("literal",)
+    _exclude_parent_types: Tuple[str, ...] = (
+        "data_type",
+        "datetime_type_identifier",
+        "primitive_type",
+    )
     config_keywords = ["capitalisation_policy", "ignore_words", "ignore_words_regex"]
     # Human readable target elem for description
     _description_elem = "Keywords"
@@ -78,27 +72,25 @@ class Rule_L010(BaseRule):
         for what the possible case is.
 
         """
-        # Skip if not an element of the specified type/name
-        parent: Optional[BaseSegment] = (
-            context.parent_stack[-1] if context.parent_stack else None
-        )
-        if self.matches_target_tuples(context.segment, self._exclude_elements, parent):
+        # NOTE: Given the dialect structure we can assume the targets have a parent.
+        parent: BaseSegment = context.parent_stack[-1]
+        if context.segment.is_type(*self._exclude_types) or parent.is_type(
+            *self._exclude_parent_types
+        ):
             return [LintResult(memory=context.memory)]
 
-        # Used by L030 (that inherits from this rule)
+        # Used by CP03 (that inherits from this rule)
         # If it's a qualified function_name (i.e with more than one part to
         # function_name). Then it is likely an existing user defined function (UDF)
         # which are case sensitive so ignore for this.
-        if (
-            context.parent_stack[-1].get_type() == "function_name"
-            and len(context.parent_stack[-1].segments) != 1
-        ):
+        if parent.get_type() == "function_name" and len(parent.segments) != 1:
             return [LintResult(memory=context.memory)]
 
-        return [self._handle_segment(context.segment, context.memory)]
+        return [self._handle_segment(context.segment, context)]
 
-    def _handle_segment(self, segment, memory) -> LintResult:
+    def _handle_segment(self, segment, context: RuleContext) -> LintResult:
         # NOTE: this mutates the memory field.
+        memory = context.memory
         self.logger.info("_handle_segment: %s, %s", segment, segment.get_type())
         # Config type hints
         self.ignore_words_regex: str
@@ -108,6 +100,7 @@ class Rule_L010(BaseRule):
             cap_policy = self.cap_policy
             cap_policy_opts = self.cap_policy_opts
             ignore_words_list = self.ignore_words_list
+            ignore_templated_areas = self.ignore_templated_areas
         except AttributeError:
             # First-time only, read the settings from configuration. This is
             # very slow.
@@ -115,7 +108,8 @@ class Rule_L010(BaseRule):
                 cap_policy,
                 cap_policy_opts,
                 ignore_words_list,
-            ) = self._init_capitalisation_policy()
+                ignore_templated_areas,
+            ) = self._init_capitalisation_policy(context)
 
         # Skip if in ignore list
         if ignore_words_list and segment.raw.lower() in ignore_words_list:
@@ -127,8 +121,10 @@ class Rule_L010(BaseRule):
         ):
             return LintResult(memory=memory)
 
-        # Skip if templated.
-        if segment.is_templated:
+        # Skip if templated.  If the user wants to ignore templated areas, we don't
+        # even want to look at them to avoid affecting flagging non-template areas
+        # that are inconsistent with the template areas.
+        if segment.is_templated and ignore_templated_areas:
             return LintResult(memory=memory)
 
         # Skip if empty.
@@ -259,7 +255,7 @@ class Rule_L010(BaseRule):
         """
         return LintFix.replace(segment, [segment.edit(fixed_raw)])
 
-    def _init_capitalisation_policy(self):
+    def _init_capitalisation_policy(self, context: RuleContext):
         """Called first time rule is evaluated to fetch & cache the policy."""
         cap_policy_name = next(
             k for k in self.config_keywords if k.endswith("capitalisation_policy")
@@ -270,7 +266,7 @@ class Rule_L010(BaseRule):
             for opt in get_config_info()[cap_policy_name]["validation"]
             if opt != "consistent"
         ]
-        # Use str() as L040 uses bools which might otherwise be read as bool
+        # Use str() as CP04 uses bools which might otherwise be read as bool
         ignore_words_config = str(getattr(self, "ignore_words"))
         if ignore_words_config and ignore_words_config != "None":
             self.ignore_words_list = self.split_comma_separated_string(
@@ -278,6 +274,7 @@ class Rule_L010(BaseRule):
             )
         else:
             self.ignore_words_list = []
+        self.ignore_templated_areas = context.config.get("ignore_templated_areas")
         self.logger.debug(
             f"Selected '{cap_policy_name}': '{self.cap_policy}' from options "
             f"{self.cap_policy_opts}"
@@ -285,4 +282,5 @@ class Rule_L010(BaseRule):
         cap_policy = self.cap_policy
         cap_policy_opts = self.cap_policy_opts
         ignore_words_list = self.ignore_words_list
-        return cap_policy, cap_policy_opts, ignore_words_list
+        ignore_templated_areas = self.ignore_templated_areas
+        return cap_policy, cap_policy_opts, ignore_words_list, ignore_templated_areas
diff --git a/src/sqlfluff/rules/L014.py b/src/sqlfluff/rules/capitalisation/CP02.py
similarity index 60%
rename from src/sqlfluff/rules/L014.py
rename to src/sqlfluff/rules/capitalisation/CP02.py
index d67c285..464da39 100644
--- a/src/sqlfluff/rules/L014.py
+++ b/src/sqlfluff/rules/capitalisation/CP02.py
@@ -1,41 +1,14 @@
-"""Implementation of Rule L014."""
+"""Implementation of Rule CP02."""
 
-from typing import Tuple, Optional, List
+from typing import Optional, List
 
-from sqlfluff.core.parser import BaseSegment
 from sqlfluff.core.rules import LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
-from sqlfluff.rules.L010 import Rule_L010
-
-
-def identifiers_policy_applicable(
-    policy: str, parent_stack: Tuple[BaseSegment, ...]
-) -> bool:
-    """Does `(un)quoted_identifiers_policy` apply to this segment?"""
-    if policy == "all":
-        return True
-    if policy == "none":
-        return False
-    is_alias = parent_stack and parent_stack[-1].is_type(
-        "alias_expression", "column_definition", "with_compound_statement"
-    )
-    if policy == "aliases" and is_alias:
-        return True
-    is_inside_from = any(p.is_type("from_clause") for p in parent_stack)
-    if policy == "column_aliases" and is_alias and not is_inside_from:
-        return True
-    return False
-
-
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L014(Rule_L010):
+from sqlfluff.utils.identifers import identifiers_policy_applicable
+from sqlfluff.rules.capitalisation.CP01 import Rule_CP01
+
+
+class Rule_CP02(Rule_CP01):
     """Inconsistent capitalisation of unquoted identifiers.
 
     **Anti-pattern**
@@ -70,8 +43,10 @@ class Rule_L014(Rule_L010):
 
     """
 
-    groups = ("all", "core")
-    lint_phase = "post"
+    name = "capitalisation.identifiers"
+    aliases = ("L014",)
+    is_fix_compatible = True
+
     crawl_behaviour = SegmentSeekerCrawler(
         {"naked_identifier", "properties_naked_identifier"}
     )
@@ -88,7 +63,7 @@ class Rule_L014(Rule_L010):
         # Data Feed
         # https://docs.delta.io/2.0.0/delta-change-data-feed.html#enable-change-data-feed
         if (
-            context.dialect.name in ["sparksql"]
+            context.dialect.name in ["databricks", "sparksql"]
             and context.parent_stack
             and context.parent_stack[-1].type == "property_name_identifier"
             and context.segment.raw == "enableChangeDataFeed"
diff --git a/src/sqlfluff/rules/L030.py b/src/sqlfluff/rules/capitalisation/CP03.py
similarity index 66%
rename from src/sqlfluff/rules/L030.py
rename to src/sqlfluff/rules/capitalisation/CP03.py
index d009fcd..d29fd75 100644
--- a/src/sqlfluff/rules/L030.py
+++ b/src/sqlfluff/rules/capitalisation/CP03.py
@@ -1,20 +1,11 @@
-"""Implementation of Rule L030."""
+"""Implementation of Rule CP03."""
 
-from typing import List, Tuple
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
 
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
-from sqlfluff.rules.L010 import Rule_L010
+from sqlfluff.rules.capitalisation.CP01 import Rule_CP01
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L030(Rule_L010):
+class Rule_CP03(Rule_CP01):
     """Inconsistent capitalisation of function names.
 
     **Anti-pattern**
@@ -41,12 +32,16 @@ class Rule_L030(Rule_L010):
 
     """
 
-    groups = ("all", "core")
-    lint_phase = "post"
+    name = "capitalisation.functions"
+    aliases = ("L030",)
+    is_fix_compatible = True
+
     crawl_behaviour = SegmentSeekerCrawler(
         {"function_name_identifier", "bare_function"}
     )
-    _exclude_elements: List[Tuple[str, str]] = []
+    _exclude_types = ()
+    _exclude_parent_types = ()
+
     config_keywords = [
         "extended_capitalisation_policy",
         "ignore_words",
diff --git a/src/sqlfluff/rules/L040.py b/src/sqlfluff/rules/capitalisation/CP04.py
similarity index 67%
rename from src/sqlfluff/rules/L040.py
rename to src/sqlfluff/rules/capitalisation/CP04.py
index 0b2b6d2..0ca971b 100644
--- a/src/sqlfluff/rules/L040.py
+++ b/src/sqlfluff/rules/capitalisation/CP04.py
@@ -1,20 +1,11 @@
-"""Implementation of Rule L040."""
+"""Implementation of Rule CP04."""
 
-from typing import Tuple, List
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
 
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
-from sqlfluff.rules.L010 import Rule_L010
+from sqlfluff.rules.capitalisation.CP01 import Rule_CP01
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L040(Rule_L010):
+class Rule_CP04(Rule_CP01):
     """Inconsistent capitalisation of boolean/null literal.
 
     **Anti-pattern**
@@ -56,8 +47,11 @@ class Rule_L040(Rule_L010):
 
     """
 
-    groups = ("all", "core")
-    lint_phase = "post"
+    name = "capitalisation.literals"
+    aliases = ("L040",)
+    is_fix_compatible = True
+
     crawl_behaviour = SegmentSeekerCrawler({"null_literal", "boolean_literal"})
-    _exclude_elements: List[Tuple[str, str]] = []
+    _exclude_types = ()
+    _exclude_parent_types = ()
     _description_elem = "Boolean/null literals"
diff --git a/src/sqlfluff/rules/L063.py b/src/sqlfluff/rules/capitalisation/CP05.py
similarity index 60%
rename from src/sqlfluff/rules/L063.py
rename to src/sqlfluff/rules/capitalisation/CP05.py
index 958b83c..cf3cd44 100644
--- a/src/sqlfluff/rules/L063.py
+++ b/src/sqlfluff/rules/capitalisation/CP05.py
@@ -1,23 +1,15 @@
-"""Implementation of Rule L063."""
+"""Implementation of Rule CP05."""
 
-from typing import Tuple, List, Optional
+from typing import List
 from sqlfluff.core.parser import BaseSegment
 from sqlfluff.core.rules.base import LintResult
 from sqlfluff.core.rules.context import RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
 
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
-from sqlfluff.rules.L010 import Rule_L010
+from sqlfluff.rules.capitalisation.CP01 import Rule_CP01
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L063(Rule_L010):
+class Rule_CP05(Rule_CP01):
     """Inconsistent capitalisation of datatypes.
 
     **Anti-pattern**
@@ -45,8 +37,11 @@ class Rule_L063(Rule_L010):
 
     """
 
-    groups = ("all",)
-    lint_phase = "post"
+    name = "capitalisation.types"
+    aliases = ("L063",)
+    groups = ("all", "core", "capitalisation")
+    is_fix_compatible = True
+
     crawl_behaviour = SegmentSeekerCrawler(
         {
             "data_type_identifier",
@@ -55,13 +50,12 @@ class Rule_L063(Rule_L010):
             "data_type",
         }
     )
-    _target_elems: List[Tuple[str, str]] = [
-        ("parenttype", "data_type"),
-        ("parenttype", "datetime_type_identifier"),
-        ("parenttype", "primitive_type"),
-        ("type", "data_type_identifier"),
-    ]
-    _exclude_elements: List[Tuple[str, str]] = []
+    # NOTE: CP05 overrides `_eval` and then only calls
+    # `_handle_segment` from CP01. Setting `_exclude_types`
+    # and `_exclude_parent_types` therefore has no effect.
+    # They are set here to empty tuples to avoid confusion.
+    _exclude_types = ()
+    _exclude_parent_types = ()
     config_keywords = [
         "extended_capitalisation_policy",
         "ignore_words",
@@ -70,20 +64,13 @@ class Rule_L063(Rule_L010):
     _description_elem = "Datatypes"
 
     def _eval(self, context: RuleContext) -> List[LintResult]:
-        """Inconsistent capitalisation of keywords.
+        """Inconsistent capitalisation of datatypes.
 
         We use the `memory` feature here to keep track of cases known to be
-        INconsistent with what we've seen so far as well as the top choice
+        inconsistent with what we've seen so far as well as the top choice
         for what the possible case is.
 
         """
-        # Skip if not an element of the specified type/name
-        parent: Optional[BaseSegment] = (
-            context.parent_stack[-1] if context.parent_stack else None
-        )
-        if self.matches_target_tuples(context.segment, self._exclude_elements, parent):
-            return [LintResult(memory=context.memory)]  # pragma: no cover
-
         results = []
         # For some of these segments we want to run the code on
         if context.segment.is_type(
@@ -96,16 +83,18 @@ class Rule_L063(Rule_L010):
                     "symbol", "identifier", "quoted_literal"
                 ) or not seg.is_type("raw"):
                     continue
-                res = self._handle_segment(seg, context.memory)
+                res = self._handle_segment(seg, context)
                 if res:
                     results.append(res)
 
+        # NOTE: Given the dialect structure we can assume the targets have a parent.
+        parent: BaseSegment = context.parent_stack[-1]
         # Don't process it if it's likely to have been processed by the parent.
-        if context.segment.is_type("data_type_identifier") and not context.parent_stack[
-            -1
-        ].is_type("primitive_type", "datetime_type_identifier", "data_type"):
+        if context.segment.is_type("data_type_identifier") and not parent.is_type(
+            "primitive_type", "datetime_type_identifier", "data_type"
+        ):
             results.append(
-                self._handle_segment(context.segment, context.memory)
+                self._handle_segment(context.segment, context)
             )  # pragma: no cover
 
         return results
diff --git a/src/sqlfluff/rules/capitalisation/__init__.py b/src/sqlfluff/rules/capitalisation/__init__.py
new file mode 100644
index 0000000..3486074
--- /dev/null
+++ b/src/sqlfluff/rules/capitalisation/__init__.py
@@ -0,0 +1,15 @@
+"""The capitalisation plugin bundle."""
+
+from sqlfluff.core.plugin import hookimpl
+
+from sqlfluff.rules.capitalisation.CP01 import Rule_CP01
+from sqlfluff.rules.capitalisation.CP02 import Rule_CP02
+from sqlfluff.rules.capitalisation.CP03 import Rule_CP03
+from sqlfluff.rules.capitalisation.CP04 import Rule_CP04
+from sqlfluff.rules.capitalisation.CP05 import Rule_CP05
+
+
+@hookimpl
+def get_rules():
+    """Get plugin rules."""
+    return [Rule_CP01, Rule_CP02, Rule_CP03, Rule_CP04, Rule_CP05]
diff --git a/src/sqlfluff/rules/L061.py b/src/sqlfluff/rules/convention/CV01.py
similarity index 89%
rename from src/sqlfluff/rules/L061.py
rename to src/sqlfluff/rules/convention/CV01.py
index 7a6c618..6b9c007 100644
--- a/src/sqlfluff/rules/L061.py
+++ b/src/sqlfluff/rules/convention/CV01.py
@@ -1,17 +1,14 @@
-"""Implementation of Rule L061."""
+"""Implementation of Rule CV01."""
 
 from typing import Optional
 
 from sqlfluff.core.parser.segments.raw import SymbolSegment
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 from sqlfluff.utils.functional import sp, FunctionalContext
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L061(BaseRule):
+class Rule_CV01(BaseRule):
     """Use ``!=`` instead of ``<>`` for "not equal to" comparisons.
 
     **Anti-pattern**
@@ -33,8 +30,11 @@ class Rule_L061(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "convention.not_equal"
+    aliases = ("L061",)
+    groups = ("all", "convention")
     crawl_behaviour = SegmentSeekerCrawler({"comparison_operator"})
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
         """Use ``!=`` instead of ``<>`` for "not equal to" comparison."""
diff --git a/src/sqlfluff/rules/L060.py b/src/sqlfluff/rules/convention/CV02.py
similarity index 89%
rename from src/sqlfluff/rules/L060.py
rename to src/sqlfluff/rules/convention/CV02.py
index a2836f0..b6ad257 100644
--- a/src/sqlfluff/rules/L060.py
+++ b/src/sqlfluff/rules/convention/CV02.py
@@ -1,16 +1,13 @@
-"""Implementation of Rule L060."""
+"""Implementation of Rule CV02."""
 
 from typing import Optional
 
 from sqlfluff.core.parser.segments.raw import CodeSegment
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L060(BaseRule):
+class Rule_CV02(BaseRule):
     """Use ``COALESCE`` instead of ``IFNULL`` or ``NVL``.
 
     **Anti-pattern**
@@ -41,8 +38,11 @@ class Rule_L060(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "convention.coalesce"
+    aliases = ("L060",)
+    groups = ("all", "convention")
     crawl_behaviour = SegmentSeekerCrawler({"function_name_identifier"})
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
         """Use ``COALESCE`` instead of ``IFNULL`` or ``NVL``."""
diff --git a/src/sqlfluff/rules/convention/CV03.py b/src/sqlfluff/rules/convention/CV03.py
new file mode 100644
index 0000000..b066c2e
--- /dev/null
+++ b/src/sqlfluff/rules/convention/CV03.py
@@ -0,0 +1,108 @@
+"""Implementation of Rule CV03."""
+from typing import Optional
+
+from sqlfluff.core.parser import BaseSegment, SymbolSegment
+
+from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
+from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
+from sqlfluff.utils.functional import sp, FunctionalContext
+
+
+class Rule_CV03(BaseRule):
+    """Trailing commas within select clause.
+
+    .. note::
+       For many database backends this is allowed. For some users
+       this may be something they wish to enforce (in line with
+       Python best practice). Many database backends regard this
+       as a syntax error, and as such the `SQLFluff` default is to
+       forbid trailing commas in the select clause.
+
+    **Anti-pattern**
+
+    .. code-block:: sql
+
+        SELECT
+            a,
+            b,
+        FROM foo
+
+    **Best practice**
+
+    .. code-block:: sql
+
+        SELECT
+            a,
+            b
+        FROM foo
+    """
+
+    name = "convention.select_trailing_comma"
+    aliases = ("L038",)
+    groups = ("all", "core", "convention")
+    config_keywords = ["select_clause_trailing_comma"]
+    crawl_behaviour = SegmentSeekerCrawler({"select_clause"})
+    is_fix_compatible = True
+
+    def _eval(self, context: RuleContext) -> Optional[LintResult]:
+        """Trailing commas within select clause."""
+        # Config type hints
+        self.select_clause_trailing_comma: str
+
+        segment = FunctionalContext(context).segment
+        children = segment.children()
+        # Iterate content to find last element
+        last_content: BaseSegment = children.last(sp.is_code())[0]
+
+        # What mode are we in?
+        if self.select_clause_trailing_comma == "forbid":
+            # Is it a comma?
+            if last_content.is_type("comma"):
+                # The last content is a comma. Before we try and remove it, we
+                # should check that it's safe. One edge case is that it's a trailing
+                # comma in a loop, but that if we try and remove it, we also break
+                # the previous examples. We should check that this comma doesn't
+                # share a source position with any other commas in the same select.
+
+                # If there isn't a source position, then it's safe to remove, it's
+                # a recent addition.
+                if not last_content.pos_marker:  # pragma: no cover
+                    fixes = [LintFix.delete(last_content)]
+                else:
+                    comma_pos = last_content.pos_marker.source_position()
+                    for seg in context.segment.segments:
+                        if seg.is_type("comma"):
+                            if not seg.pos_marker:  # pragma: no cover
+                                continue
+                            elif seg.pos_marker.source_position() == comma_pos:
+                                if seg is not last_content:
+                                    # Not safe to fix
+                                    self.logger.info(
+                                        "Preventing deletion of %s, because source "
+                                        "position is the same as %s. Templated "
+                                        "positions are %s and %s.",
+                                        last_content,
+                                        seg,
+                                        last_content.pos_marker.templated_position(),
+                                        seg.pos_marker.templated_position(),
+                                    )
+                                    fixes = []
+                                    break
+                    else:
+                        # No matching commas found. It's safe.
+                        fixes = [LintFix.delete(last_content)]
+
+                return LintResult(
+                    anchor=last_content,
+                    fixes=fixes,
+                    description="Trailing comma in select statement forbidden",
+                )
+        elif self.select_clause_trailing_comma == "require":
+            if not last_content.is_type("comma"):
+                new_comma = SymbolSegment(",", type="comma")
+                return LintResult(
+                    anchor=last_content,
+                    fixes=[LintFix.replace(last_content, [last_content, new_comma])],
+                    description="Trailing comma in select statement required",
+                )
+        return None
diff --git a/src/sqlfluff/rules/L047.py b/src/sqlfluff/rules/convention/CV04.py
similarity index 94%
rename from src/sqlfluff/rules/L047.py
rename to src/sqlfluff/rules/convention/CV04.py
index b6dcd1f..12b879c 100644
--- a/src/sqlfluff/rules/L047.py
+++ b/src/sqlfluff/rules/convention/CV04.py
@@ -1,20 +1,12 @@
-"""Implementation of Rule L047."""
+"""Implementation of Rule CV04."""
 from typing import Optional
 
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
 from sqlfluff.utils.functional import sp, FunctionalContext
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L047(BaseRule):
+class Rule_CV04(BaseRule):
     """Use consistent syntax to express "count number of rows".
 
     Note:
@@ -60,9 +52,12 @@ class Rule_L047(BaseRule):
 
     """
 
-    groups = ("all", "core")
+    name = "convention.count_rows"
+    aliases = ("L047",)
+    groups = ("all", "core", "convention")
     config_keywords = ["prefer_count_1", "prefer_count_0"]
     crawl_behaviour = SegmentSeekerCrawler({"function"})
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
         """Find rule violations and provide fixes."""
diff --git a/src/sqlfluff/rules/L049.py b/src/sqlfluff/rules/convention/CV05.py
similarity index 79%
rename from src/sqlfluff/rules/L049.py
rename to src/sqlfluff/rules/convention/CV05.py
index a24aa3e..79a3f38 100644
--- a/src/sqlfluff/rules/L049.py
+++ b/src/sqlfluff/rules/convention/CV05.py
@@ -1,10 +1,9 @@
-"""Implementation of Rule L049."""
+"""Implementation of Rule CV05."""
 from typing import List, Optional, Union
 
 from sqlfluff.core.parser import KeywordSegment, WhitespaceSegment
 from sqlfluff.core.rules import LintResult, RuleContext, BaseRule
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 from sqlfluff.utils.functional import sp, Segments
 from sqlfluff.utils.reflow import ReflowSequence
 
@@ -12,9 +11,7 @@ from sqlfluff.utils.reflow import ReflowSequence
 CorrectionListType = List[Union[WhitespaceSegment, KeywordSegment]]
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L049(BaseRule):
+class Rule_CV05(BaseRule):
     """Comparisons with NULL should use "IS" or "IS NOT".
 
     **Anti-pattern**
@@ -41,8 +38,11 @@ class Rule_L049(BaseRule):
         WHERE a IS NULL
     """
 
-    groups = ("all", "core")
+    name = "convention.is_null"
+    aliases = ("L049",)
+    groups = ("all", "core", "convention")
     crawl_behaviour = SegmentSeekerCrawler({"comparison_operator"})
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
         """Relational operators should not be used to check for NULL values."""
@@ -52,7 +52,7 @@ class Rule_L049(BaseRule):
 
         # Allow assignments in SET clauses
         if len(context.parent_stack) >= 2 and context.parent_stack[-2].is_type(
-            "set_clause_list", "execute_script_statement"
+            "set_clause_list", "execute_script_statement", "options_segment"
         ):
             return None
 
@@ -62,6 +62,16 @@ class Rule_L049(BaseRule):
         ):
             return None
 
+        # If the operator is in an EXCLUDE constraint (PostgreSQL feature), the SQL
+        # could look like: EXCLUDE (field WITH =).  In that case, we can exit early
+        # to avoid an assertion failure due to no segment following the operator.
+        # Note that if the EXCLUDE is based on an expression, we will still be
+        # checking that expression because it will be under a different child segment.
+        if context.parent_stack and context.parent_stack[-1].is_type(
+            "exclusion_constraint_element"
+        ):
+            return None
+
         # We only care about equality operators.
         if context.segment.raw not in ("=", "!=", "<>"):
             return None
diff --git a/src/sqlfluff/rules/L052.py b/src/sqlfluff/rules/convention/CV06.py
similarity index 98%
rename from src/sqlfluff/rules/L052.py
rename to src/sqlfluff/rules/convention/CV06.py
index fd3c56d..270ccdb 100644
--- a/src/sqlfluff/rules/L052.py
+++ b/src/sqlfluff/rules/convention/CV06.py
@@ -1,4 +1,4 @@
-"""Implementation of Rule L052."""
+"""Implementation of Rule CV06."""
 from typing import List, NamedTuple, Optional, Sequence, cast
 
 from sqlfluff.core.parser import SymbolSegment
@@ -7,11 +7,6 @@ from sqlfluff.core.parser.segments.raw import NewlineSegment, RawSegment
 
 from sqlfluff.core.rules import BaseRule, LintResult, LintFix, RuleContext
 from sqlfluff.core.rules.crawlers import RootOnlyCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
 from sqlfluff.utils.functional import Segments, sp
 
 
@@ -24,10 +19,7 @@ class SegmentMoveContext(NamedTuple):
     whitespace_deletions: Segments
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L052(BaseRule):
+class Rule_CV06(BaseRule):
     """Statements must end with a semi-colon.
 
     **Anti-pattern**
@@ -60,9 +52,12 @@ class Rule_L052(BaseRule):
         FROM foo;
     """
 
-    groups = ("all",)
+    name = "convention.terminator"
+    aliases = ("L052",)
+    groups = ("all", "convention")
     config_keywords = ["multiline_newline", "require_final_semicolon"]
     crawl_behaviour = RootOnlyCrawler()
+    is_fix_compatible = True
 
     @staticmethod
     def _handle_preceding_inline_comments(
@@ -327,6 +322,8 @@ class Rule_L052(BaseRule):
             elif not segment.is_meta:
                 before_segment.append(segment)
             trigger_segment = segment
+        else:
+            return None  # File does not contain any statements
         self.logger.debug("Trigger on: %s", trigger_segment)
         self.logger.debug("Anchoring on: %s", anchor_segment)
 
diff --git a/src/sqlfluff/rules/L053.py b/src/sqlfluff/rules/convention/CV07.py
similarity index 94%
rename from src/sqlfluff/rules/L053.py
rename to src/sqlfluff/rules/convention/CV07.py
index b30a50c..9cf8092 100644
--- a/src/sqlfluff/rules/L053.py
+++ b/src/sqlfluff/rules/convention/CV07.py
@@ -1,16 +1,13 @@
-"""Implementation of Rule L053."""
+"""Implementation of Rule CV07."""
 from typing import List
 
 from sqlfluff.core.parser.segments.base import IdentitySet
 from sqlfluff.core.rules import BaseRule, LintResult, LintFix, RuleContext
 from sqlfluff.core.rules.crawlers import RootOnlyCrawler
 from sqlfluff.utils.functional import Segments, sp
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L053(BaseRule):
+class Rule_CV07(BaseRule):
     """Top-level statements should not be wrapped in brackets.
 
     **Anti-pattern**
@@ -48,8 +45,11 @@ class Rule_L053(BaseRule):
         FROM (SELECT * FROM bar)
     """
 
-    groups = ("all",)
+    name = "convention.statement_brackets"
+    aliases = ("L053",)
+    groups = ("all", "convention")
     crawl_behaviour = RootOnlyCrawler()
+    is_fix_compatible = True
 
     @staticmethod
     def _iter_statements(file_segment):
diff --git a/src/sqlfluff/rules/L055.py b/src/sqlfluff/rules/convention/CV08.py
similarity index 88%
rename from src/sqlfluff/rules/L055.py
rename to src/sqlfluff/rules/convention/CV08.py
index dc51ef5..7978733 100644
--- a/src/sqlfluff/rules/L055.py
+++ b/src/sqlfluff/rules/convention/CV08.py
@@ -1,13 +1,11 @@
-"""Implementation of Rule L055."""
+"""Implementation of Rule CV08."""
 from typing import Optional
 
 from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_groups
 
 
-@document_groups
-class Rule_L055(BaseRule):
+class Rule_CV08(BaseRule):
     """Use ``LEFT JOIN`` instead of ``RIGHT JOIN``.
 
     **Anti-pattern**
@@ -39,7 +37,9 @@ class Rule_L055(BaseRule):
             ON foo.bar_id = bar.id;
     """
 
-    groups = ("all",)
+    name = "convention.left_join"
+    aliases = ("L055",)
+    groups = ("all", "convention")
     crawl_behaviour = SegmentSeekerCrawler({"join_clause"})
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
diff --git a/src/sqlfluff/rules/L062.py b/src/sqlfluff/rules/convention/CV09.py
similarity index 76%
rename from src/sqlfluff/rules/L062.py
rename to src/sqlfluff/rules/convention/CV09.py
index 3ad5180..a0684d2 100644
--- a/src/sqlfluff/rules/L062.py
+++ b/src/sqlfluff/rules/convention/CV09.py
@@ -1,16 +1,13 @@
-"""Implementation of Rule L062."""
+"""Implementation of Rule CV09."""
 
 import regex
 from typing import Optional
 
 from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_configuration, document_groups
 
 
-@document_groups
-@document_configuration
-class Rule_L062(BaseRule):
+class Rule_CV09(BaseRule):
     """Block a list of configurable words from being used.
 
     This generic rule can be useful to prevent certain keywords, functions, or objects
@@ -24,7 +21,7 @@ class Rule_L062(BaseRule):
       this. Until such a rule is written, we can add ``BOOLEAN`` to the deny list
       to cause a linting error to flag this.
     * We have deprecated a schema/table/function and want to prevent it being used
-      in future. We can add that to the denylist and then add a ``-- noqa: L062`` for
+      in future. We can add that to the denylist and then add a ``-- noqa: CV09`` for
       the few exceptions that still need to be in the code base for now.
 
     **Anti-pattern**
@@ -48,18 +45,22 @@ class Rule_L062(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "convention.blocked_words"
+    aliases = ("L062",)
+    groups = ("all", "convention")
     # It's a broad selector, but only trigger on raw segments.
     crawl_behaviour = SegmentSeekerCrawler({"raw"})
     config_keywords = [
         "blocked_words",
         "blocked_regex",
+        "match_source",
     ]
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
         # Config type hints
         self.blocked_words: Optional[str]
         self.blocked_regex: Optional[str]
+        self.match_source: Optional[bool]
 
         # Exit early if no block list set
         if not self.blocked_words and not self.blocked_regex:
@@ -82,11 +83,21 @@ class Rule_L062(BaseRule):
                 description=f"Use of blocked word '{context.segment.raw}'.",
             )
 
-        if self.blocked_regex and regex.search(self.blocked_regex, context.segment.raw):
-            return LintResult(
-                anchor=context.segment,
-                description=f"Use of blocked regex '{context.segment.raw}'.",
-            )
+        if self.blocked_regex:
+            if regex.search(self.blocked_regex, context.segment.raw):
+                return LintResult(
+                    anchor=context.segment,
+                    description=f"Use of blocked regex '{context.segment.raw}'.",
+                )
+
+            if self.match_source:
+                for segment in context.segment.raw_segments:
+                    source_str = segment.pos_marker.source_str()
+                    if regex.search(self.blocked_regex, source_str):
+                        return LintResult(
+                            anchor=context.segment,
+                            description=f"Use of blocked regex '{source_str}'.",
+                        )
 
         return None
 
diff --git a/src/sqlfluff/rules/L064.py b/src/sqlfluff/rules/convention/CV10.py
similarity index 97%
rename from src/sqlfluff/rules/L064.py
rename to src/sqlfluff/rules/convention/CV10.py
index bd1c62d..c477d2c 100644
--- a/src/sqlfluff/rules/L064.py
+++ b/src/sqlfluff/rules/convention/CV10.py
@@ -1,4 +1,4 @@
-"""Implementation of Rule L064."""
+"""Implementation of Rule CV10."""
 
 from typing import Optional
 
@@ -6,20 +6,12 @@ import regex
 
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
 from sqlfluff.utils.functional import rsp, FunctionalContext
 from sqlfluff.core.parser.markers import PositionMarker
 from sqlfluff.dialects.dialect_ansi import LiteralSegment
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L064(BaseRule):
+class Rule_CV10(BaseRule):
     r"""Consistent usage of preferred quotes for quoted literals.
 
     Some databases allow quoted literals to use either single or double quotes.
@@ -34,7 +26,8 @@ class Rule_L064(BaseRule):
        cannot interchange single and double quotes
 
        This rule is only enabled for dialects that allow single *and* double quotes for
-       quoted literals (currently ``bigquery``, ``hive``, ``mysql``, ``sparksql``).
+       quoted literals
+       (currently ``bigquery``, ``databricks``, ``hive``, ``mysql``, ``sparksql``).
        It can be enabled for other dialects with the ``force_enable = True`` flag.
 
     **Anti-pattern**
@@ -66,12 +59,16 @@ class Rule_L064(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "convention.quoted_literals"
+    aliases = ("L064",)
+    groups = ("all", "convention")
     config_keywords = ["preferred_quoted_literal_style", "force_enable"]
     crawl_behaviour = SegmentSeekerCrawler({"literal"})
     targets_templated = True
+    is_fix_compatible = True
     _dialects_with_double_quoted_strings = [
         "bigquery",
+        "databricks",
         "hive",
         "mysql",
         "sparksql",
diff --git a/src/sqlfluff/rules/L067.py b/src/sqlfluff/rules/convention/CV11.py
similarity index 88%
rename from src/sqlfluff/rules/L067.py
rename to src/sqlfluff/rules/convention/CV11.py
index 21403e9..7a2a117 100644
--- a/src/sqlfluff/rules/L067.py
+++ b/src/sqlfluff/rules/convention/CV11.py
@@ -1,16 +1,11 @@
-"""Implementation of Rule L067."""
+"""Implementation of Rule CV11."""
 
-from typing import Optional, List
+from typing import Optional, List, Iterable
 
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.utils.functional import sp, FunctionalContext, Segments
 
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
 from sqlfluff.core.parser import (
     WhitespaceSegment,
     SymbolSegment,
@@ -19,10 +14,7 @@ from sqlfluff.core.parser import (
 )
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L067(BaseRule):
+class Rule_CV11(BaseRule):
     """Enforce consistent type casting style.
 
     .. note::
@@ -60,9 +52,12 @@ class Rule_L067(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "convention.casting_style"
+    aliases = ("L067",)
+    groups = ("all", "convention")
     config_keywords = ["preferred_type_casting_style"]
     crawl_behaviour = SegmentSeekerCrawler({"function", "cast_expression"})
+    is_fix_compatible = True
 
     @staticmethod
     def _get_children(segments: Segments) -> Segments:
@@ -86,22 +81,26 @@ class Rule_L067(BaseRule):
     @staticmethod
     def _cast_fix_list(
         context: RuleContext,
-        cast_arg_1: BaseSegment,
+        cast_arg_1: Iterable[BaseSegment],
         cast_arg_2: BaseSegment,
         later_types: Optional[Segments] = None,
     ) -> List[LintFix]:
         """Generate list of fixes to convert CONVERT and ShorthandCast to CAST."""
         # Add cast and opening parenthesis.
-        edits = [
-            KeywordSegment("cast"),
-            SymbolSegment("(", type="start_bracket"),
-            cast_arg_1,
-            WhitespaceSegment(),
-            KeywordSegment("as"),
-            WhitespaceSegment(),
-            cast_arg_2,
-            SymbolSegment(")", type="end_bracket"),
-        ]
+        edits = (
+            [
+                KeywordSegment("cast"),
+                SymbolSegment("(", type="start_bracket"),
+            ]
+            + list(cast_arg_1)
+            + [
+                WhitespaceSegment(),
+                KeywordSegment("as"),
+                WhitespaceSegment(),
+                cast_arg_2,
+                SymbolSegment(")", type="end_bracket"),
+            ]
+        )
 
         if later_types:
             pre_edits: List[BaseSegment] = [
@@ -174,11 +173,20 @@ class Rule_L067(BaseRule):
         context: RuleContext, shorthand_arg_1: BaseSegment, shorthand_arg_2: BaseSegment
     ) -> List[LintFix]:
         """Generate list of fixes to convert CAST and CONVERT to ShorthandCast."""
-        edits = [
-            shorthand_arg_1,
-            SymbolSegment("::", type="casting_operator"),
-            shorthand_arg_2,
-        ]
+        if len(shorthand_arg_1.raw_segments) > 1:
+            edits = [
+                SymbolSegment("(", type="start_bracket"),
+                shorthand_arg_1,
+                SymbolSegment(")", type="end_bracket"),
+            ]
+        else:
+            edits = [shorthand_arg_1]
+        edits.extend(
+            [
+                SymbolSegment("::", type="casting_operator"),
+                shorthand_arg_2,
+            ]
+        )
 
         fixes = [
             LintFix.replace(
@@ -246,7 +254,7 @@ class Rule_L067(BaseRule):
 
                     fixes = self._cast_fix_list(
                         context,
-                        convert_content[1],
+                        [convert_content[1]],
                         convert_content[0],
                     )
                 elif current_type_casting_style == "shorthand":
@@ -258,7 +266,7 @@ class Rule_L067(BaseRule):
                     print(previous_skipped)
                     fixes = self._cast_fix_list(
                         context,
-                        expression_datatype_segment[0],
+                        [expression_datatype_segment[0]],
                         expression_datatype_segment[1],
                         # We can have multiple shorthandcast e.g 1::int::text
                         # in that case, we need to introduce nested CAST()
@@ -345,19 +353,25 @@ class Rule_L067(BaseRule):
 
                     fixes = self._cast_fix_list(
                         context,
-                        convert_content[1],
+                        [convert_content[1]],
                         convert_content[0],
                     )
                 elif current_type_casting_style == "shorthand":
                     expression_datatype_segment = self._get_children(
                         functional_context.segment
                     )
+
+                    for data_type_idx, seg in enumerate(expression_datatype_segment):
+                        if seg.is_type("data_type"):
+                            break
+
                     fixes = self._cast_fix_list(
                         context,
-                        expression_datatype_segment[0],
-                        expression_datatype_segment[1],
-                        expression_datatype_segment[2:],
+                        expression_datatype_segment[:data_type_idx],
+                        expression_datatype_segment[data_type_idx],
+                        expression_datatype_segment[data_type_idx + 1 :],
                     )
+
             elif self.preferred_type_casting_style == "convert":
                 if current_type_casting_style == "cast":
                     cast_content = self._get_children(
@@ -398,24 +412,12 @@ class Rule_L067(BaseRule):
                         convert_content[1],
                         convert_content[0],
                     )
-            if convert_content and len(convert_content) > 2:
-                return LintResult(
-                    anchor=context.segment,
-                    memory=context.memory,
-                    description=(
-                        "Used type casting style is different from"
-                        " the preferred type casting style."
-                    ),
-                )
-            elif cast_content and len(cast_content) > 2:
-                return LintResult(
-                    anchor=context.segment,
-                    memory=context.memory,
-                    description=(
-                        "Used type casting style is different from"
-                        " the preferred type casting style."
-                    ),
-                )
+
+            # Don't fix if there's too much content.
+            if (convert_content and len(convert_content) > 2) or (
+                cast_content and len(cast_content) > 2
+            ):
+                fixes = []
 
             return LintResult(
                 anchor=context.segment,
diff --git a/src/sqlfluff/rules/convention/__init__.py b/src/sqlfluff/rules/convention/__init__.py
new file mode 100644
index 0000000..b8837cd
--- /dev/null
+++ b/src/sqlfluff/rules/convention/__init__.py
@@ -0,0 +1,33 @@
+"""The convention plugin bundle."""
+
+from sqlfluff.core.plugin import hookimpl
+
+from sqlfluff.rules.convention.CV01 import Rule_CV01
+from sqlfluff.rules.convention.CV02 import Rule_CV02
+from sqlfluff.rules.convention.CV03 import Rule_CV03
+from sqlfluff.rules.convention.CV04 import Rule_CV04
+from sqlfluff.rules.convention.CV05 import Rule_CV05
+from sqlfluff.rules.convention.CV06 import Rule_CV06
+from sqlfluff.rules.convention.CV07 import Rule_CV07
+from sqlfluff.rules.convention.CV08 import Rule_CV08
+from sqlfluff.rules.convention.CV09 import Rule_CV09
+from sqlfluff.rules.convention.CV10 import Rule_CV10
+from sqlfluff.rules.convention.CV11 import Rule_CV11
+
+
+@hookimpl
+def get_rules():
+    """Get plugin rules."""
+    return [
+        Rule_CV01,
+        Rule_CV02,
+        Rule_CV03,
+        Rule_CV04,
+        Rule_CV05,
+        Rule_CV06,
+        Rule_CV07,
+        Rule_CV08,
+        Rule_CV09,
+        Rule_CV10,
+        Rule_CV11,
+    ]
diff --git a/src/sqlfluff/rules/L046.py b/src/sqlfluff/rules/jinja/JJ01.py
similarity index 85%
rename from src/sqlfluff/rules/L046.py
rename to src/sqlfluff/rules/jinja/JJ01.py
index ec6489c..dca9f70 100644
--- a/src/sqlfluff/rules/L046.py
+++ b/src/sqlfluff/rules/jinja/JJ01.py
@@ -1,4 +1,4 @@
-"""Implementation of Rule L046."""
+"""Implementation of Rule JJ01."""
 from typing import Tuple
 from sqlfluff.core.parser.segments import SourceFix
 
@@ -11,11 +11,9 @@ from sqlfluff.core.rules import (
 )
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
 from sqlfluff.utils.functional import rsp, FunctionalContext
-from sqlfluff.core.rules.doc_decorators import document_groups
 
 
-@document_groups
-class Rule_L046(BaseRule):
+class Rule_JJ01(BaseRule):
     """Jinja tags should have a single whitespace on either side.
 
     **Anti-pattern**
@@ -23,7 +21,7 @@ class Rule_L046(BaseRule):
     Jinja tags with either no whitespace or very long whitespace
     are hard to read.
 
-    .. code-block:: sql
+    .. code-block:: jinja
        :force:
 
         SELECT {{    a     }} from {{ref('foo')}}
@@ -33,7 +31,7 @@ class Rule_L046(BaseRule):
     A single whitespace surrounding Jinja tags, alternatively
     longer gaps containing newlines are acceptable.
 
-    .. code-block:: sql
+    .. code-block:: jinja
        :force:
 
         SELECT {{ a }} from {{ ref('foo') }};
@@ -42,12 +40,15 @@ class Rule_L046(BaseRule):
         }};
     """
 
-    groups = ("all", "core")
+    name = "jinja.padding"
+    aliases = ("L046",)
+    groups = ("all", "core", "jinja")
     # Crawling for "raw" isn't a great way of filtering but it will
     # do for now. TODO: Make a more efficient crawler for templated
     # sections.
     crawl_behaviour = SegmentSeekerCrawler({"raw"})
     targets_templated = True
+    is_fix_compatible = True
 
     @staticmethod
     def _get_whitespace_ends(s: str) -> Tuple[str, str, str, str, str]:
@@ -55,19 +56,19 @@ class Rule_L046(BaseRule):
 
         This function assumes that we've already trimmed the string
         to just the tag, and will raise an AssertionError if not.
-        >>> Rule_L046._get_whitespace_ends('  {{not_trimmed}}   ')
+        >>> Rule_JJ01._get_whitespace_ends('  {{not_trimmed}}   ')
         Traceback (most recent call last):
             ...
         AssertionError
 
         In essence it divides up a tag into the end tokens, any
         leading or trailing whitespace and the inner content
-        >>> Rule_L046._get_whitespace_ends('{{ my_content }}')
+        >>> Rule_JJ01._get_whitespace_ends('{{ my_content }}')
         ('{{', ' ', 'my_content', ' ', '}}')
 
         It also works with block tags and more complicated content
         and end markers.
-        >>> Rule_L046._get_whitespace_ends('{%+if a + b is True     -%}')
+        >>> Rule_JJ01._get_whitespace_ends('{%+if a + b is True     -%}')
         ('{%+', '', 'if a + b is True', '     ', '-%}')
         """
         assert s[0] == "{" and s[-1] == "}"
@@ -117,7 +118,7 @@ class Rule_L046(BaseRule):
                     "Tag found @ %s: %r ", context.segment.pos_marker, stripped
                 )
 
-                # Dedupe using a memory of source indexes.
+                # Deduplicate using a memory of source indexes.
                 # This is important because several positions in the
                 # templated file may refer to the same position in the
                 # source file and we only want to get one violation.
@@ -126,6 +127,19 @@ class Rule_L046(BaseRule):
                     continue
                 memory.add(src_idx)
 
+                # Does the segment already have a source fix associated with it?
+                # NOTE: because we're fetching the raw slices, even if we've
+                # already fixed an issue, we won't know that. To make sure we don't
+                # double fixes, we check for fixes already present. We do this
+                # _after_ adding it to memory, because on a second pass through
+                # the file, the memory will have been wiped.
+                if context.segment.source_fixes:
+                    self.logger.debug(
+                        "Segment already has source fixes. Skipping for safety: %s",
+                        context.segment.source_fixes,
+                    )
+                    continue
+
                 # Partition and Position
                 tag_pre, ws_pre, inner, ws_post, tag_post = self._get_whitespace_ends(
                     stripped
diff --git a/src/sqlfluff/rules/jinja/__init__.py b/src/sqlfluff/rules/jinja/__init__.py
new file mode 100644
index 0000000..89c6355
--- /dev/null
+++ b/src/sqlfluff/rules/jinja/__init__.py
@@ -0,0 +1,11 @@
+"""The jinja rules plugin bundle."""
+
+from sqlfluff.core.plugin import hookimpl
+
+from sqlfluff.rules.jinja.JJ01 import Rule_JJ01
+
+
+@hookimpl
+def get_rules():
+    """Get plugin rules."""
+    return [Rule_JJ01]
diff --git a/src/sqlfluff/rules/layout/LT01.py b/src/sqlfluff/rules/layout/LT01.py
new file mode 100644
index 0000000..bfc6ce4
--- /dev/null
+++ b/src/sqlfluff/rules/layout/LT01.py
@@ -0,0 +1,70 @@
+"""Implementation of Rule LT01."""
+from typing import List, Optional
+
+from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
+from sqlfluff.core.rules.crawlers import RootOnlyCrawler
+from sqlfluff.utils.reflow.sequence import ReflowSequence
+
+
+class Rule_LT01(BaseRule):
+    """Inappropriate Spacing.
+
+    This rule checks for an enforces the spacing as configured in
+    :ref:`layoutconfig`. This includes excessive whitespace,
+    trailing whitespace at the end of a line and also the wrong
+    spacing between elements on the line. Because of this wide reach
+    you may find that you wish to add specific configuration in your
+    project to tweak how specific elements are treated. Rather than
+    configuration on this specific rule, use the `sqlfluff.layout`
+    section of your configuration file to customise how this rule
+    operates.
+
+    The ``•`` character represents a space in the examples below.
+
+    **Anti-pattern**
+
+    .. code-block:: sql
+        :force:
+
+        SELECT
+            a,        b(c) as d••
+        FROM foo••••
+        JOIN bar USING(a)
+
+    **Best practice**
+
+    * Unless an indent or preceding a comment, whitespace should
+      be a single space.
+
+    * There should also be no trailing whitespace at the ends of lines.
+
+    * There should be a space after :code:`USING` so that it's not confused
+      for a function.
+
+    .. code-block:: sql
+
+        SELECT
+            a, b(c) as d
+        FROM foo
+        JOIN bar USING (a)
+    """
+
+    name = "layout.spacing"
+    # NOTE: This rule combines the following legacy rules:
+    # - L001: Trailing Whitespace
+    # - L005 & L008: Space around commas
+    # - L006: Space around operators
+    # - L023: Space after AS in WITH clause
+    # - L024: Space immediately after USING
+    # - L039: Unnecessary Whitespace
+    # - L048: Spacing around quoted literals
+    # - L071: Spacing around brackets
+    aliases = ("L001", "L005", "L006", "L008", "L023", "L024", "L039", "L048", "L071")
+    groups = ("all", "core", "layout")
+    crawl_behaviour = RootOnlyCrawler()
+    is_fix_compatible = True
+
+    def _eval(self, context: RuleContext) -> Optional[List[LintResult]]:
+        """Unnecessary whitespace."""
+        sequence = ReflowSequence.from_root(context.segment, config=context.config)
+        return sequence.respace().get_results()
diff --git a/src/sqlfluff/rules/layout/LT02.py b/src/sqlfluff/rules/layout/LT02.py
new file mode 100644
index 0000000..47e0ab8
--- /dev/null
+++ b/src/sqlfluff/rules/layout/LT02.py
@@ -0,0 +1,65 @@
+"""Implementation of Rule LT02."""
+from typing import List
+
+from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
+from sqlfluff.core.rules.crawlers import RootOnlyCrawler
+from sqlfluff.utils.reflow.sequence import ReflowSequence
+
+
+class Rule_LT02(BaseRule):
+    """Incorrect Indentation.
+
+    **Anti-pattern**
+
+    The ``•`` character represents a space and the ``→`` character represents a tab.
+    In this example, the third line contains five spaces instead of four and
+    the second line contains two spaces and one tab.
+
+    .. code-block:: sql
+       :force:
+
+        SELECT
+        ••→a,
+        •••••b
+        FROM foo
+
+
+    **Best practice**
+
+    Change the indentation to use a multiple of four spaces. This example also
+    assumes that the ``indent_unit`` config value is set to ``space``. If it
+    had instead been set to ``tab``, then the indents would be tabs instead.
+
+    .. code-block:: sql
+       :force:
+
+        SELECT
+        ••••a,
+        ••••b
+        FROM foo
+
+    """
+
+    name = "layout.indent"
+    # NOTE: We're combining three legacy rules here into one.
+    aliases = ("L002", "L003", "L004")
+    groups = ("all", "core", "layout")
+    crawl_behaviour = RootOnlyCrawler()
+    is_fix_compatible = True
+    targets_templated = True
+    template_safe_fixes = True
+    _adjust_anchors = True
+
+    def _eval(self, context: RuleContext) -> List[LintResult]:
+        """Indentation not consistent with previous lines.
+
+        To set the default tab size, set the `tab_space_size` value
+        in the appropriate configuration. To correct indents to tabs
+        use the `indent_unit` value set to `tab`.
+
+        """
+        return (
+            ReflowSequence.from_root(context.segment, context.config)
+            .reindent()
+            .get_results()
+        )
diff --git a/src/sqlfluff/rules/layout/LT03.py b/src/sqlfluff/rules/layout/LT03.py
new file mode 100644
index 0000000..2c3c710
--- /dev/null
+++ b/src/sqlfluff/rules/layout/LT03.py
@@ -0,0 +1,144 @@
+"""Implementation of Rule LT03."""
+
+from typing import List, Sequence
+
+from sqlfluff.core.parser import BaseSegment
+from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
+from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
+from sqlfluff.utils.reflow import ReflowSequence
+
+
+class Rule_LT03(BaseRule):
+    """Operators should follow a standard for being before/after newlines.
+
+    **Anti-pattern**
+
+    In this example, if ``operator_new_lines = after`` (or unspecified, as is the
+    default), then the operator ``+`` should not be at the end of the second line.
+
+    .. code-block:: sql
+
+        SELECT
+            a +
+            b
+        FROM foo
+
+
+    **Best practice**
+
+    If ``operator_new_lines = after`` (or unspecified, as this is the default),
+    place the operator after the newline.
+
+    .. code-block:: sql
+
+        SELECT
+            a
+            + b
+        FROM foo
+
+    If ``operator_new_lines = before``, place the operator before the newline.
+
+    .. code-block:: sql
+
+        SELECT
+            a +
+            b
+        FROM foo
+    """
+
+    name = "layout.operators"
+    aliases = ("L007",)
+    groups = ("all", "layout")
+    crawl_behaviour = SegmentSeekerCrawler({"binary_operator", "comparison_operator"})
+    is_fix_compatible = True
+
+    def _seek_newline(self, segments: Sequence[BaseSegment], idx: int, dir: int):
+        """Seek in a direction, looking for newlines.
+
+        Args:
+            segments: A sequence of segments to seek within.
+            idx: The index of the "current" segment.
+            dir: The direction to seek in (+1 for forward, -1 for backward)
+        """
+        assert dir in (1, -1)
+        for segment in segments[idx + dir :: dir]:
+            if segment.is_type("newline"):
+                # It's definitely leading. No problems.
+                self.logger.debug(
+                    "Shortcut (dir = %s) OK. Found newline: %s", dir, segment
+                )
+                return True
+            elif not segment.is_type("whitespace", "indent", "comment"):
+                # We found something before it which suggests it's not leading.
+                # We should run the full reflow routine to check.
+                break
+        return False
+
+    def _check_trail_lead_shortcut(
+        self, segment: BaseSegment, parent: BaseSegment, line_position: str
+    ) -> bool:
+        """Check to see whether we should pass the rule and shortcut.
+
+        Args:
+            segment: The target segment.
+            parent: The parent segment (must contain `segment`).
+            line_position: The `line_position` config for the segment.
+        """
+        idx = parent.segments.index(segment)
+        # Shortcut #1: Leading.
+        if line_position == "leading":
+            if self._seek_newline(parent.segments, idx, dir=-1):
+                return True
+            # If we didn't find a newline before, if there's _also_ not a newline
+            # after, then we can also shortcut. i.e. it's a comma "mid line".
+            if not self._seek_newline(parent.segments, idx, dir=1):
+                return True
+
+        # Shortcut #2: Trailing.
+        elif line_position == "trailing":
+            if self._seek_newline(parent.segments, idx, dir=1):
+                return True
+            # If we didn't find a newline after, if there's _also_ not a newline
+            # before, then we can also shortcut. i.e. it's a comma "mid line".
+            if not self._seek_newline(parent.segments, idx, dir=-1):
+                return True
+        return False
+
+    def _eval(self, context: RuleContext) -> List[LintResult]:
+        """Operators should follow a standard for being before/after newlines.
+
+        For the fixing routines we delegate to the reflow utils. However
+        for performance reasons we have some initial shortcuts to quickly
+        identify situations which are _ok_ to avoid the overhead of the
+        full reflow path.
+        """
+        # NOTE: These shortcuts assume that any newlines will be direct
+        # siblings of the operator in question. This isn't _always_ the case
+        # but is true often enough to have meaningful upside from early
+        # detection.
+        if context.segment.is_type("comparison_operator"):
+            comparison_positioning = context.config.get(
+                "line_position", ["layout", "type", "comparison_operator"]
+            )
+            if self._check_trail_lead_shortcut(
+                context.segment, context.parent_stack[-1], comparison_positioning
+            ):
+                return [LintResult()]
+        elif context.segment.is_type("binary_operator"):
+            binary_positioning = context.config.get(
+                "line_position", ["layout", "type", "binary_operator"]
+            )
+            if self._check_trail_lead_shortcut(
+                context.segment, context.parent_stack[-1], binary_positioning
+            ):
+                return [LintResult()]
+
+        return (
+            ReflowSequence.from_around_target(
+                context.segment,
+                root_segment=context.parent_stack[0],
+                config=context.config,
+            )
+            .rebreak()
+            .get_results()
+        )
diff --git a/src/sqlfluff/rules/L019.py b/src/sqlfluff/rules/layout/LT04.py
similarity index 56%
rename from src/sqlfluff/rules/L019.py
rename to src/sqlfluff/rules/layout/LT04.py
index 6d51a83..4746a40 100644
--- a/src/sqlfluff/rules/L019.py
+++ b/src/sqlfluff/rules/layout/LT04.py
@@ -1,21 +1,15 @@
-"""Implementation of Rule L019."""
+"""Implementation of Rule LT04."""
 
 from typing import List
 
-from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
+from sqlfluff.core.rules import LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
 from sqlfluff.utils.reflow import ReflowSequence
 
+from sqlfluff.rules.layout.LT03 import Rule_LT03
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L019(BaseRule):
+
+class Rule_LT04(Rule_LT03):
     """Leading/Trailing comma enforcement.
 
     **Anti-pattern**
@@ -54,22 +48,33 @@ class Rule_L019(BaseRule):
         FROM foo
     """
 
-    groups = ("all",)
+    name = "layout.commas"
+    aliases = ("L019",)
+    groups = ("all", "layout")
     crawl_behaviour = SegmentSeekerCrawler({"comma"})
     _adjust_anchors = True
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> List[LintResult]:
         """Enforce comma placement.
 
-        For leading commas we're looking for trailing commas, so
-        we look for newline segments. For trailing commas we're
-        looking for leading commas, so we look for the comma itself.
-
-        We also want to handle proper whitespace removal/addition. We remove
-        any trailing whitespace after the leading comma, when converting a
-        leading comma to a trailing comma. We add whitespace after the leading
-        comma when converting a trailing comma to a leading comma.
+        For the fixing routines we delegate to the reflow utils. However
+        for performance reasons we have some initial shortcuts to quickly
+        identify situations which are _ok_ to avoid the overhead of the
+        full reflow path.
         """
+        comma_positioning = context.config.get(
+            "line_position", ["layout", "type", "comma"]
+        )
+        # NOTE: These shortcuts assume that any newlines will be direct
+        # siblings of the comma in question. This isn't _always_ the case
+        # but is true often enough to have meaningful upside from early
+        # detection.
+        if self._check_trail_lead_shortcut(
+            context.segment, context.parent_stack[-1], comma_positioning
+        ):
+            return [LintResult()]
+
         return (
             ReflowSequence.from_around_target(
                 context.segment,
diff --git a/src/sqlfluff/rules/layout/LT05.py b/src/sqlfluff/rules/layout/LT05.py
new file mode 100644
index 0000000..601d2ca
--- /dev/null
+++ b/src/sqlfluff/rules/layout/LT05.py
@@ -0,0 +1,135 @@
+"""Implementation of Rule LT05."""
+
+from typing import List, cast
+
+from sqlfluff.core.parser.segments import TemplateSegment
+
+from sqlfluff.core.rules import LintResult, RuleContext
+from sqlfluff.core.rules.base import BaseRule
+from sqlfluff.core.rules.crawlers import RootOnlyCrawler
+
+from sqlfluff.utils.reflow.sequence import ReflowSequence
+
+
+class Rule_LT05(BaseRule):
+    """Line is too long."""
+
+    name = "layout.long_lines"
+    aliases = ("L016",)
+    groups = ("all", "core", "layout")
+    crawl_behaviour = RootOnlyCrawler()
+    targets_templated = True
+    template_safe_fixes = True
+    _adjust_anchors = True
+    _check_docstring = False
+    is_fix_compatible = True
+
+    config_keywords = [
+        "ignore_comment_lines",
+        "ignore_comment_clauses",
+    ]
+
+    def _eval(self, context: RuleContext) -> List[LintResult]:
+        """Line is too long."""
+        self.ignore_comment_lines: bool
+        self.ignore_comment_clauses: bool
+        # Reflow and generate fixes.
+        results = (
+            ReflowSequence.from_root(context.segment, context.config)
+            .break_long_lines()
+            .get_results()
+        )
+
+        # Ignore any comment line if appropriate.
+        if self.ignore_comment_lines:
+            raw_segments = context.segment.raw_segments
+            for res in results[:]:
+                # First handle the easy case that the anchor (i.e. the start
+                # of the line is a comment).
+                if res.anchor.is_type("comment"):
+                    self.logger.debug(
+                        "Purging result on long line starting with comment: %s",
+                        res.anchor.pos_marker.working_line_no,
+                    )
+                    results.remove(res)
+                    continue
+                # Then look for comments on the rest of the line:
+                raw_idx = raw_segments.index(res.anchor)
+                for seg in raw_segments[raw_idx:]:
+                    if (
+                        seg.pos_marker.working_line_no
+                        != res.anchor.pos_marker.working_line_no
+                    ):
+                        # We've gone past the end of the line. Stop looking.
+                        break  # pragma: no cover
+                    # Is it a comment?
+                    if seg.is_type("comment"):
+                        self.logger.debug(
+                            "Purging result on long line containing comment: %s",
+                            res.anchor.pos_marker.working_line_no,
+                        )
+                        results.remove(res)
+                        break
+                    # Is it a template comment?
+                    elif (
+                        seg.is_type("placeholder")
+                        and cast(TemplateSegment, seg).block_type == "comment"
+                    ):
+                        self.logger.debug(
+                            "Purging result with template comment line: %s",
+                            res.anchor.pos_marker.working_line_no,
+                        )
+                        results.remove(res)
+                        break
+
+        # Ignore any comment clauses if present.
+        if self.ignore_comment_clauses:
+            raw_segments = context.segment.raw_segments
+            for res in results[:]:
+                # The anchor should be the first raw on the line. Work forward
+                # until we're not on the line. Check if any have a parent which
+                # is a comment_clause.
+                raw_idx = raw_segments.index(res.anchor)
+                for seg in raw_segments[raw_idx:]:
+                    if (
+                        seg.pos_marker.working_line_no
+                        != res.anchor.pos_marker.working_line_no
+                    ):
+                        # We've gone past the end of the line. Stop looking.
+                        break
+                    # Look to see if any are in comment clauses
+                    for ps in context.segment.path_to(seg):
+                        if ps.segment.is_type(
+                            "comment_clause", "comment_equals_clause"
+                        ):
+                            # It IS! Ok, purge this result from results, unless
+                            # the line is already too long without the comment.
+                            # We'll know that based on the line position of
+                            # the comment.
+                            # We can fairly confidently assert that the segment
+                            # will have a position marker at this stage.
+                            assert ps.segment.pos_marker
+                            line_pos = ps.segment.pos_marker.working_line_pos
+                            if line_pos < context.config.get("max_line_length"):
+                                # OK purge it.
+                                self.logger.debug(
+                                    "Purging result on long line with comment "
+                                    "clause: %s",
+                                    res.anchor.pos_marker.working_line_no,
+                                )
+                                results.remove(res)
+                                break
+                            self.logger.debug(
+                                "Keeping result on long line with comment clause. "
+                                "Still too long without comment: %s",
+                                res.anchor.pos_marker.working_line_no,
+                            )
+                    # If we finish the loop without breaking, we didn't find a
+                    # comment. Keep looking.
+                    else:
+                        continue
+                    # If we did finish with a break, we should break the outer
+                    # loop too.
+                    break
+
+        return results
diff --git a/src/sqlfluff/rules/L017.py b/src/sqlfluff/rules/layout/LT06.py
similarity index 83%
rename from src/sqlfluff/rules/L017.py
rename to src/sqlfluff/rules/layout/LT06.py
index 56dc9bf..fd77afb 100644
--- a/src/sqlfluff/rules/L017.py
+++ b/src/sqlfluff/rules/layout/LT06.py
@@ -1,14 +1,11 @@
-"""Implementation of Rule L017."""
+"""Implementation of Rule LT06."""
 
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 from sqlfluff.utils.functional import sp, FunctionalContext
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L017(BaseRule):
+class Rule_LT06(BaseRule):
     """Function name not immediately followed by parenthesis.
 
     **Anti-pattern**
@@ -33,14 +30,22 @@ class Rule_L017(BaseRule):
 
     """
 
-    groups = ("all", "core")
+    name = "layout.functions"
+    aliases = ("L017",)
+    groups = ("all", "core", "layout")
     crawl_behaviour = SegmentSeekerCrawler({"function"})
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> LintResult:
         """Function name not immediately followed by bracket.
 
         Look for Function Segment with anything other than the
         function name before brackets
+
+        NOTE: This hasn't been combined with LT01 because it has
+        some special treatment for comments. That might be something
+        we revisit at a later point if duplicate errors become
+        problematic.
         """
         segment = FunctionalContext(context).segment
         # We only trigger on start_bracket (open parenthesis)
diff --git a/src/sqlfluff/rules/L018.py b/src/sqlfluff/rules/layout/LT07.py
similarity index 50%
rename from src/sqlfluff/rules/L018.py
rename to src/sqlfluff/rules/layout/LT07.py
index 33e5734..20dd799 100644
--- a/src/sqlfluff/rules/L018.py
+++ b/src/sqlfluff/rules/layout/LT07.py
@@ -1,27 +1,16 @@
-"""Implementation of Rule L018."""
-
-from typing import cast
+"""Implementation of Rule LT07."""
 
 from sqlfluff.core.parser import (
     IdentitySet,
     NewlineSegment,
-    PositionMarker,
 )
 
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
 from sqlfluff.utils.functional import sp, FunctionalContext
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L018(BaseRule):
+class Rule_LT07(BaseRule):
     """``WITH`` clause closing bracket should be on a new line.
 
     **Anti-pattern**
@@ -50,10 +39,13 @@ class Rule_L018(BaseRule):
 
     """
 
-    groups = ("all", "core")
+    name = "layout.cte_bracket"
+    aliases = ("L018",)
+    groups = ("all", "core", "layout")
     crawl_behaviour = SegmentSeekerCrawler(
         {"with_compound_statement"}, provide_raw_stack=True
     )
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext):
         """WITH clause closing bracket should be aligned with WITH keyword.
@@ -62,20 +54,6 @@ class Rule_L018(BaseRule):
         """
         # We only trigger on start_bracket (open parenthesis)
         assert context.segment.is_type("with_compound_statement")
-        raw_stack_buff = list(context.raw_stack)
-        # Look for the with keyword
-        for seg in context.segment.segments:
-            if seg.raw_upper == "WITH":
-                seg_line_no = seg.pos_marker.line_no
-                break
-        else:  # pragma: no cover
-            # This *could* happen if the with statement is unparsable,
-            # in which case then the user will have to fix that first.
-            if any(s.is_type("unparsable") for s in context.segment.segments):
-                return LintResult()
-            # If it's parsable but we still didn't find a with, then
-            # we should raise that.
-            raise RuntimeError("Didn't find WITH keyword!")
 
         # Find the end brackets for the CTE *query* (i.e. ignore optional
         # list of CTE columns).
@@ -85,43 +63,53 @@ class Rule_L018(BaseRule):
             .segment.children(sp.is_type("common_table_expression"))
             .iterate_segments()
         ):
+            cte_start_bracket = (
+                cte.children()
+                .last(sp.is_type("bracketed"))
+                .children()
+                .first(sp.is_type("start_bracket"))
+            )
             cte_end_bracket = (
                 cte.children()
                 .last(sp.is_type("bracketed"))
                 .children()
                 .last(sp.is_type("end_bracket"))
             )
-            if cte_end_bracket:
+            if cte_start_bracket and cte_end_bracket:
+                self.logger.debug(
+                    "Found CTE with brackets: %s & %s",
+                    cte_start_bracket,
+                    cte_end_bracket,
+                )
+                # Are they on the same line?
+                # NOTE: This assertion should be fairly safe because
+                # there aren't many reasons for an bracket to not yet
+                # be positioned.
+                assert cte_start_bracket[0].pos_marker
+                assert cte_end_bracket[0].pos_marker
+                if (
+                    cte_start_bracket[0].pos_marker.line_no
+                    == cte_end_bracket[0].pos_marker.line_no
+                ):
+                    # Same line
+                    self.logger.debug("Skipping because on same line.")
+                    continue
+                # Otherwise add to the ones to check.
                 cte_end_brackets.add(cte_end_bracket[0])
-        for seg in context.segment.iter_segments(
-            expanding=["common_table_expression", "bracketed"], pass_through=True
-        ):
-            if seg not in cte_end_brackets:
-                if not seg.is_type("start_bracket"):
-                    raw_stack_buff.append(seg)
-                continue
-
-            if seg.pos_marker.line_no == seg_line_no:
-                # Skip if it's the one-line version. That's ok
-                continue
-
-            # Is it all whitespace before the bracket on this line?
-            assert seg.pos_marker
 
+        for seg in cte_end_brackets:
             contains_non_whitespace = False
-            for elem in context.segment.raw_segments:
-                if (
-                    cast(PositionMarker, elem.pos_marker).line_no
-                    == seg.pos_marker.line_no
-                    and cast(PositionMarker, elem.pos_marker).line_pos
-                    <= seg.pos_marker.line_pos
-                ):
-                    if elem is seg:
-                        break
-                    elif elem.is_type("newline"):
-                        contains_non_whitespace = False
-                    elif not elem.is_type("dedent") and not elem.is_type("whitespace"):
-                        contains_non_whitespace = True
+            idx = context.segment.raw_segments.index(seg)
+            self.logger.debug("End bracket %s has idx %s", seg, idx)
+            # Search backward through the raw segments from just before
+            # the location of the bracket.
+            for elem in context.segment.raw_segments[idx - 1 :: -1]:
+                if elem.is_type("newline"):
+                    break
+                elif not elem.is_type("indent", "whitespace"):
+                    self.logger.debug("Found non-whitespace: %s", elem)
+                    contains_non_whitespace = True
+                    break
 
             if contains_non_whitespace:
                 # We have to move it to a newline
diff --git a/src/sqlfluff/rules/L022.py b/src/sqlfluff/rules/layout/LT08.py
similarity index 96%
rename from src/sqlfluff/rules/L022.py
rename to src/sqlfluff/rules/layout/LT08.py
index 84633c7..22af770 100644
--- a/src/sqlfluff/rules/L022.py
+++ b/src/sqlfluff/rules/layout/LT08.py
@@ -1,21 +1,13 @@
-"""Implementation of Rule L022."""
+"""Implementation of Rule LT08."""
 
 from typing import Optional, List
 from sqlfluff.core.parser import NewlineSegment
 
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L022(BaseRule):
+class Rule_LT08(BaseRule):
     """Blank line expected but not found after CTE closing bracket.
 
     **Anti-pattern**
@@ -44,8 +36,11 @@ class Rule_L022(BaseRule):
 
     """
 
-    groups = ("all", "core")
+    name = "layout.cte_newline"
+    aliases = ("L022",)
+    groups = ("all", "core", "layout")
     crawl_behaviour = SegmentSeekerCrawler({"with_compound_statement"})
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> Optional[List[LintResult]]:
         """Blank line expected but not found after CTE definition."""
diff --git a/src/sqlfluff/rules/layout/LT09.py b/src/sqlfluff/rules/layout/LT09.py
new file mode 100644
index 0000000..e110e38
--- /dev/null
+++ b/src/sqlfluff/rules/layout/LT09.py
@@ -0,0 +1,447 @@
+"""Implementation of Rule LT09."""
+
+from typing import List, NamedTuple, Optional, Sequence
+
+from sqlfluff.core.parser import WhitespaceSegment
+
+from sqlfluff.core.parser import BaseSegment, NewlineSegment
+from sqlfluff.core.parser.segments.base import IdentitySet
+from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
+from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
+from sqlfluff.utils.functional import Segments, sp, FunctionalContext
+
+
+class SelectTargetsInfo(NamedTuple):
+    """Info about select targets and nearby whitespace."""
+
+    select_idx: int
+    first_new_line_idx: int
+    first_select_target_idx: int
+    first_whitespace_idx: int
+    comment_after_select_idx: int
+    select_targets: Sequence[BaseSegment]
+    from_segment: Optional[BaseSegment]
+    pre_from_whitespace: List[BaseSegment]
+
+
+class Rule_LT09(BaseRule):
+    """Select targets should be on a new line unless there is only one select target.
+
+    .. note::
+       By default, a wildcard (e.g. ``SELECT *``) is considered a single select target.
+       If you want it to be treated as multiple select targets, configure
+       ``wildcard_policy = multiple``.
+
+    **Anti-pattern**
+
+    Multiple select targets on the same line.
+
+    .. code-block:: sql
+
+        select a, b
+        from foo;
+
+        -- Single select target on its own line.
+
+        SELECT
+            a
+        FROM foo;
+
+
+    **Best practice**
+
+    Multiple select targets each on their own line.
+
+    .. code-block:: sql
+
+        select
+            a,
+            b
+        from foo;
+
+        -- Single select target on the same line as the ``SELECT``
+        -- keyword.
+
+        SELECT a
+        FROM foo;
+
+        -- When select targets span multiple lines, however they
+        -- can still be on a new line.
+
+        SELECT
+            SUM(
+                1 + SUM(
+                    2 + 3
+                )
+            ) AS col
+        FROM test_table;
+
+    """
+
+    name = "layout.select_targets"
+    aliases = ("L036",)
+    groups = ("all", "layout")
+    config_keywords = ["wildcard_policy"]
+    crawl_behaviour = SegmentSeekerCrawler({"select_clause"})
+    is_fix_compatible = True
+
+    def _eval(self, context: RuleContext):
+        self.wildcard_policy: str
+        assert context.segment.is_type("select_clause")
+        select_targets_info = self._get_indexes(context)
+        select_clause = FunctionalContext(context).segment
+        wildcards = select_clause.children(
+            sp.is_type("select_clause_element")
+        ).children(sp.is_type("wildcard_expression"))
+        has_wildcard = bool(wildcards)
+        if len(select_targets_info.select_targets) == 1 and (
+            not has_wildcard or self.wildcard_policy == "single"
+        ):
+            return self._eval_single_select_target_element(
+                select_targets_info,
+                context,
+            )
+        elif len(select_targets_info.select_targets):
+            return self._eval_multiple_select_target_elements(
+                select_targets_info, context.segment
+            )
+
+    @staticmethod
+    def _get_indexes(context: RuleContext):
+        children = FunctionalContext(context).segment.children()
+        select_targets = children.select(sp.is_type("select_clause_element"))
+        first_select_target_idx = children.find(select_targets.get())
+        selects = children.select(sp.is_keyword("select"))
+        select_idx = children.find(selects.get()) if selects else -1
+        newlines = children.select(sp.is_type("newline"))
+        first_new_line_idx = children.find(newlines.get()) if newlines else -1
+        comment_after_select_idx = -1
+        if newlines:
+            comment_after_select = children.select(
+                sp.is_type("comment"),
+                start_seg=selects.get(),
+                stop_seg=newlines.get(),
+                loop_while=sp.or_(
+                    sp.is_type("comment"), sp.is_type("whitespace"), sp.is_meta()
+                ),
+            )
+            if comment_after_select:
+                comment_after_select_idx = (
+                    children.find(comment_after_select.get())
+                    if comment_after_select
+                    else -1
+                )
+        first_whitespace_idx = -1
+        if first_new_line_idx != -1:
+            # TRICKY: Ignore whitespace prior to the first newline, e.g. if
+            # the line with "SELECT" (before any select targets) has trailing
+            # whitespace.
+            segments_after_first_line = children.select(
+                sp.is_type("whitespace"), start_seg=children[first_new_line_idx]
+            )
+            first_whitespace_idx = children.find(segments_after_first_line.get())
+
+        siblings_post = FunctionalContext(context).siblings_post
+        from_segment = siblings_post.first(sp.is_type("from_clause")).first().get()
+        pre_from_whitespace = siblings_post.select(
+            sp.is_type("whitespace"), stop_seg=from_segment
+        )
+        return SelectTargetsInfo(
+            select_idx,
+            first_new_line_idx,
+            first_select_target_idx,
+            first_whitespace_idx,
+            comment_after_select_idx,
+            select_targets,
+            from_segment,
+            list(pre_from_whitespace),
+        )
+
+    def _eval_multiple_select_target_elements(self, select_targets_info, segment):
+        """Multiple select targets. Ensure each is on a separate line."""
+        # Insert newline before every select target.
+        fixes = []
+        for i, select_target in enumerate(select_targets_info.select_targets):
+            base_segment = (
+                segment if not i else select_targets_info.select_targets[i - 1]
+            )
+            if (
+                base_segment.pos_marker.working_line_no
+                == select_target.pos_marker.working_line_no
+            ):
+                # Find and delete any whitespace before the select target.
+                start_seg = select_targets_info.select_idx
+                # If any select modifier (e.g. distinct ) is present, start
+                # there rather than at the beginning.
+                modifier = segment.get_child("select_clause_modifier")
+                if modifier:
+                    start_seg = segment.segments.index(modifier)
+
+                ws_to_delete = segment.select_children(
+                    start_seg=segment.segments[start_seg]
+                    if not i
+                    else select_targets_info.select_targets[i - 1],
+                    select_if=lambda s: s.is_type("whitespace"),
+                    loop_while=lambda s: s.is_type("whitespace", "comma") or s.is_meta,
+                )
+                fixes += [LintFix.delete(ws) for ws in ws_to_delete]
+                fixes.append(LintFix.create_before(select_target, [NewlineSegment()]))
+
+            # If we are at the last select target check if the FROM clause
+            # is on the same line, and if so move it to its own line.
+            if select_targets_info.from_segment:
+                if (i + 1 == len(select_targets_info.select_targets)) and (
+                    select_target.pos_marker.working_line_no
+                    == select_targets_info.from_segment.pos_marker.working_line_no
+                ):
+                    fixes.extend(
+                        [
+                            LintFix.delete(ws)
+                            for ws in select_targets_info.pre_from_whitespace
+                        ]
+                    )
+                    fixes.append(
+                        LintFix.create_before(
+                            select_targets_info.from_segment,
+                            [NewlineSegment()],
+                        )
+                    )
+
+        if fixes:
+            return LintResult(anchor=segment, fixes=fixes)
+
+    def _eval_single_select_target_element(
+        self, select_targets_info, context: RuleContext
+    ):
+        select_clause = FunctionalContext(context).segment
+        parent_stack = context.parent_stack
+
+        # If it's all on one line, then there's no issue.
+        if not (
+            select_targets_info.select_idx
+            < select_targets_info.first_new_line_idx
+            < select_targets_info.first_select_target_idx
+        ):
+            self.logger.info(
+                "Target at index %s is already on a single line.",
+                select_targets_info.first_select_target_idx,
+            )
+            return None
+
+        # Do we have a modifier?
+        select_children = select_clause.children()
+        modifier: Optional[Segments]
+        modifier = select_children.first(sp.is_type("select_clause_modifier"))
+
+        # Does the target contain a newline?
+        # i.e. even if it's a single element, does it already span more than
+        # one line?
+        if (
+            "newline"
+            in select_children[
+                select_targets_info.first_select_target_idx
+            ].descendant_type_set
+        ):
+            self.logger.info(
+                "Target at index %s spans multiple lines so ignoring.",
+                select_targets_info.first_select_target_idx,
+            )
+            return None
+
+        # Prepare the select clause which will be inserted
+        insert_buff = [
+            WhitespaceSegment(),
+            select_children[select_targets_info.first_select_target_idx],
+        ]
+
+        # Check if the modifier is one we care about
+        if modifier:
+            # If it's already on the first line, ignore it.
+            if (
+                select_children.index(modifier.get())
+                < select_targets_info.first_new_line_idx
+            ):
+                modifier = None
+        fixes = [
+            # Delete the first select target from its original location.
+            # We'll add it to the right section at the end, once we know
+            # what to add.
+            LintFix.delete(
+                select_children[select_targets_info.first_select_target_idx],
+            ),
+        ]
+
+        # If we have a modifier to move:
+        if modifier:
+            # Add it to the insert
+            insert_buff = [WhitespaceSegment(), modifier[0]] + insert_buff
+
+            modifier_idx = select_children.index(modifier.get())
+            # Delete the whitespace after it (which is two after, thanks to indent)
+            if (
+                len(select_children) > modifier_idx + 1
+                and select_children[modifier_idx + 2].is_whitespace
+            ):
+                fixes += [
+                    LintFix.delete(
+                        select_children[modifier_idx + 2],
+                    ),
+                ]
+
+            # Delete the modifier itself
+            fixes += [
+                LintFix.delete(
+                    modifier[0],
+                ),
+            ]
+
+            # Set the position marker for removing the preceding
+            # whitespace and newline, which we'll use below.
+            start_idx = modifier_idx
+        else:
+            # Set the position marker for removing the preceding
+            # whitespace and newline, which we'll use below.
+            start_idx = select_targets_info.first_select_target_idx
+
+        if parent_stack and parent_stack[-1].is_type("select_statement"):
+            select_stmt = parent_stack[-1]
+            select_clause_idx = select_stmt.segments.index(select_clause.get())
+            after_select_clause_idx = select_clause_idx + 1
+            if len(select_stmt.segments) > after_select_clause_idx:
+
+                def _fixes_for_move_after_select_clause(
+                    stop_seg: BaseSegment,
+                    delete_segments: Optional[Segments] = None,
+                    add_newline: bool = True,
+                ) -> List[LintFix]:
+                    """Cleans up by moving leftover select_clause segments.
+
+                    Context: Some of the other fixes we make in
+                    _eval_single_select_target_element() leave leftover
+                    child segments that need to be moved to become
+                    *siblings* of the select_clause.
+                    """
+                    start_seg = (
+                        modifier[0]
+                        if modifier
+                        else select_children[select_targets_info.first_new_line_idx]
+                    )
+                    move_after_select_clause = select_children.select(
+                        start_seg=start_seg,
+                        stop_seg=stop_seg,
+                    )
+                    # :TRICKY: Below, we have a couple places where we
+                    # filter to guard against deleting the same segment
+                    # multiple times -- this is illegal.
+                    # :TRICKY: Use IdentitySet rather than set() since
+                    # different segments may compare as equal.
+                    all_deletes = IdentitySet(
+                        fix.anchor for fix in fixes if fix.edit_type == "delete"
+                    )
+                    fixes_ = []
+                    for seg in delete_segments or []:
+                        if seg not in all_deletes:
+                            fixes.append(LintFix.delete(seg))
+                            all_deletes.add(seg)
+                    fixes_ += [
+                        LintFix.delete(seg)
+                        for seg in move_after_select_clause
+                        if seg not in all_deletes
+                    ]
+                    fixes_.append(
+                        LintFix.create_after(
+                            select_clause[0],
+                            ([NewlineSegment()] if add_newline else [])
+                            + list(move_after_select_clause),
+                        )
+                    )
+                    return fixes_
+
+                if select_stmt.segments[after_select_clause_idx].is_type("newline"):
+                    # Since we're deleting the newline, we should also delete all
+                    # whitespace before it or it will add random whitespace to
+                    # following statements. So walk back through the segment
+                    # deleting whitespace until you get the previous newline, or
+                    # something else.
+                    to_delete = select_children.reversed().select(
+                        loop_while=sp.is_type("whitespace"),
+                        start_seg=select_children[start_idx],
+                    )
+                    if to_delete:
+                        # The select_clause is immediately followed by a
+                        # newline. Delete the newline in order to avoid leaving
+                        # behind an empty line after fix, *unless* we stopped
+                        # due to something other than a newline.
+                        delete_last_newline = select_children[
+                            start_idx - len(to_delete) - 1
+                        ].is_type("newline")
+
+                        # Delete the newline if we decided to.
+                        if delete_last_newline:
+                            fixes.append(
+                                LintFix.delete(
+                                    select_stmt.segments[after_select_clause_idx],
+                                )
+                            )
+
+                        fixes += _fixes_for_move_after_select_clause(
+                            to_delete[-1], to_delete
+                        )
+                elif select_stmt.segments[after_select_clause_idx].is_type(
+                    "whitespace"
+                ):
+                    # The select_clause has stuff after (most likely a comment)
+                    # Delete the whitespace immediately after the select clause
+                    # so the other stuff aligns nicely based on where the select
+                    # clause started.
+                    fixes += [
+                        LintFix.delete(
+                            select_stmt.segments[after_select_clause_idx],
+                        ),
+                    ]
+                    fixes += _fixes_for_move_after_select_clause(
+                        select_children[select_targets_info.first_select_target_idx],
+                    )
+                elif select_stmt.segments[after_select_clause_idx].is_type("dedent"):
+                    # Again let's strip back the whitespace, but simpler
+                    # as don't need to worry about new line so just break
+                    # if see non-whitespace
+                    to_delete = select_children.reversed().select(
+                        loop_while=sp.is_type("whitespace"),
+                        start_seg=select_children[select_clause_idx - 1],
+                    )
+                    if to_delete:
+                        fixes += _fixes_for_move_after_select_clause(
+                            to_delete[-1],
+                            to_delete,
+                            # If we deleted a newline, create a newline.
+                            any(seg for seg in to_delete if seg.is_type("newline")),
+                        )
+                else:
+                    fixes += _fixes_for_move_after_select_clause(
+                        select_children[select_targets_info.first_select_target_idx],
+                    )
+
+        if select_targets_info.comment_after_select_idx == -1:
+            fixes += [
+                # Insert the select_clause in place of the first newline in the
+                # Select statement
+                LintFix.replace(
+                    select_children[select_targets_info.first_new_line_idx],
+                    insert_buff,
+                ),
+            ]
+        else:
+            # The SELECT is followed by a comment on the same line. In order
+            # to autofix this, we'd need to move the select target between
+            # SELECT and the comment and potentially delete the entire line
+            # where the select target was (if it is now empty). This is
+            # *fairly tricky and complex*, in part because the newline on
+            # the select target's line is several levels higher in the
+            # parser tree. Hence, we currently don't autofix this. Could be
+            # autofixed in the future if/when we have the time.
+            fixes = []
+        return LintResult(
+            anchor=select_clause.get(),
+            fixes=fixes,
+        )
diff --git a/src/sqlfluff/rules/L041.py b/src/sqlfluff/rules/layout/LT10.py
similarity index 95%
rename from src/sqlfluff/rules/L041.py
rename to src/sqlfluff/rules/layout/LT10.py
index 0043b58..089cf58 100644
--- a/src/sqlfluff/rules/L041.py
+++ b/src/sqlfluff/rules/layout/LT10.py
@@ -1,17 +1,14 @@
-"""Implementation of Rule L041."""
+"""Implementation of Rule LT10."""
 from typing import Optional
 
 from sqlfluff.core.parser import NewlineSegment, WhitespaceSegment
 
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 from sqlfluff.utils.functional import sp, FunctionalContext
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L041(BaseRule):
+class Rule_LT10(BaseRule):
     """``SELECT`` modifiers (e.g. ``DISTINCT``) must be on the same line as ``SELECT``.
 
     **Anti-pattern**
@@ -35,8 +32,11 @@ class Rule_L041(BaseRule):
 
     """
 
-    groups = ("all", "core")
+    name = "layout.select_modifiers"
+    aliases = ("L041",)
+    groups = ("all", "core", "layout")
     crawl_behaviour = SegmentSeekerCrawler({"select_clause"})
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
         """Select clause modifiers must appear on same line as SELECT."""
diff --git a/src/sqlfluff/rules/L065.py b/src/sqlfluff/rules/layout/LT11.py
similarity index 85%
rename from src/sqlfluff/rules/L065.py
rename to src/sqlfluff/rules/layout/LT11.py
index 7075934..1ae2554 100644
--- a/src/sqlfluff/rules/L065.py
+++ b/src/sqlfluff/rules/layout/LT11.py
@@ -1,15 +1,12 @@
-"""Implementation of Rule L065."""
+"""Implementation of Rule LT11."""
 from typing import List
 
 from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 from sqlfluff.utils.reflow.sequence import ReflowSequence
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L065(BaseRule):
+class Rule_LT11(BaseRule):
     """Set operators should be surrounded by newlines.
 
     **Anti-pattern**
@@ -31,8 +28,10 @@ class Rule_L065(BaseRule):
 
     """
 
-    groups = ("all",)
-
+    name = "layout.set_operators"
+    aliases = ("L065",)
+    groups = ("all", "core", "layout")
+    is_fix_compatible = True
     crawl_behaviour = SegmentSeekerCrawler({"set_operator"})
 
     def _eval(self, context: RuleContext) -> List[LintResult]:
diff --git a/src/sqlfluff/rules/L009.py b/src/sqlfluff/rules/layout/LT12.py
similarity index 95%
rename from src/sqlfluff/rules/L009.py
rename to src/sqlfluff/rules/layout/LT12.py
index 949c0f6..7f1f570 100644
--- a/src/sqlfluff/rules/L009.py
+++ b/src/sqlfluff/rules/layout/LT12.py
@@ -1,10 +1,9 @@
-"""Implementation of Rule L009."""
+"""Implementation of Rule LT12."""
 from typing import List, Optional, Tuple
 
 from sqlfluff.core.parser import BaseSegment, NewlineSegment
 from sqlfluff.core.rules import BaseRule, LintResult, LintFix, RuleContext
 from sqlfluff.core.rules.crawlers import RootOnlyCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 from sqlfluff.utils.functional import Segments, sp, tsp, FunctionalContext
 
 
@@ -31,9 +30,7 @@ def get_last_segment(segment: Segments) -> Tuple[List[BaseSegment], Segments]:
             return parent_stack, segment
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L009(BaseRule):
+class Rule_LT12(BaseRule):
     """Files must end with a single trailing newline.
 
     **Anti-pattern**
@@ -105,12 +102,15 @@ class Rule_L009(BaseRule):
 
     """
 
-    groups = ("all", "core")
+    name = "layout.end-of-file"
+    aliases = ("L009",)
+    groups = ("all", "core", "layout")
 
     targets_templated = True
     # Use the RootOnlyCrawler to only call _eval() ONCE, with the root segment.
     crawl_behaviour = RootOnlyCrawler()
     lint_phase = "post"
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
         """Files must end with a single trailing newline.
diff --git a/src/sqlfluff/rules/L050.py b/src/sqlfluff/rules/layout/LT13.py
similarity index 93%
rename from src/sqlfluff/rules/L050.py
rename to src/sqlfluff/rules/layout/LT13.py
index c57581e..13200f9 100644
--- a/src/sqlfluff/rules/L050.py
+++ b/src/sqlfluff/rules/layout/LT13.py
@@ -1,15 +1,12 @@
-"""Implementation of Rule L050."""
+"""Implementation of Rule LT13."""
 from typing import Optional
 
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import RootOnlyCrawler
 from sqlfluff.utils.functional import Segments, sp, rsp
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L050(BaseRule):
+class Rule_LT13(BaseRule):
     """Files must not begin with newlines or whitespace.
 
     **Anti-pattern**
@@ -66,11 +63,14 @@ class Rule_L050(BaseRule):
             foo
     """
 
-    groups = ("all",)
+    name = "layout.start_of_file"
+    aliases = ("L050",)
+    groups = ("all", "layout")
     targets_templated = True
     # Use the RootOnlyCrawler to only call _eval() ONCE, with the root segment.
     crawl_behaviour = RootOnlyCrawler()
     lint_phase = "post"
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
         """Files must not begin with newlines or whitespace."""
diff --git a/src/sqlfluff/rules/layout/__init__.py b/src/sqlfluff/rules/layout/__init__.py
new file mode 100644
index 0000000..9842d4e
--- /dev/null
+++ b/src/sqlfluff/rules/layout/__init__.py
@@ -0,0 +1,37 @@
+"""The aliasing plugin bundle."""
+
+from sqlfluff.core.plugin import hookimpl
+
+from sqlfluff.rules.layout.LT01 import Rule_LT01
+from sqlfluff.rules.layout.LT02 import Rule_LT02
+from sqlfluff.rules.layout.LT03 import Rule_LT03
+from sqlfluff.rules.layout.LT04 import Rule_LT04
+from sqlfluff.rules.layout.LT05 import Rule_LT05
+from sqlfluff.rules.layout.LT06 import Rule_LT06
+from sqlfluff.rules.layout.LT07 import Rule_LT07
+from sqlfluff.rules.layout.LT08 import Rule_LT08
+from sqlfluff.rules.layout.LT09 import Rule_LT09
+from sqlfluff.rules.layout.LT10 import Rule_LT10
+from sqlfluff.rules.layout.LT11 import Rule_LT11
+from sqlfluff.rules.layout.LT12 import Rule_LT12
+from sqlfluff.rules.layout.LT13 import Rule_LT13
+
+
+@hookimpl
+def get_rules():
+    """Get plugin rules."""
+    return [
+        Rule_LT01,
+        Rule_LT02,
+        Rule_LT03,
+        Rule_LT04,
+        Rule_LT05,
+        Rule_LT06,
+        Rule_LT07,
+        Rule_LT08,
+        Rule_LT09,
+        Rule_LT10,
+        Rule_LT11,
+        Rule_LT12,
+        Rule_LT13,
+    ]
diff --git a/src/sqlfluff/rules/L026.py b/src/sqlfluff/rules/references/RF01.py
similarity index 89%
rename from src/sqlfluff/rules/L026.py
rename to src/sqlfluff/rules/references/RF01.py
index ddf6547..7ae7f06 100644
--- a/src/sqlfluff/rules/L026.py
+++ b/src/sqlfluff/rules/references/RF01.py
@@ -1,4 +1,4 @@
-"""Implementation of Rule L026."""
+"""Implementation of Rule RF01."""
 from dataclasses import dataclass, field
 from typing import cast, List, Optional, Tuple
 
@@ -15,7 +15,6 @@ from sqlfluff.core.rules import (
     EvalResultType,
 )
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_configuration, document_groups
 from sqlfluff.utils.functional import sp, FunctionalContext
 from sqlfluff.core.rules.reference import object_ref_matches_table
 
@@ -29,22 +28,22 @@ _START_TYPES = [
 
 
 @dataclass
-class L026Query(SelectCrawlerQuery):
-    """SelectCrawler Query with custom L026 info."""
+class RF01Query(SelectCrawlerQuery):
+    """SelectCrawler Query with custom RF01 info."""
 
     aliases: List[AliasInfo] = field(default_factory=list)
     standalone_aliases: List[str] = field(default_factory=list)
 
 
-@document_groups
-@document_configuration
-class Rule_L026(BaseRule):
+class Rule_RF01(BaseRule):
     """References cannot reference objects not present in ``FROM`` clause.
 
     .. note::
-       This rule is disabled by default for BigQuery, Hive, Redshift, SOQL, and SparkSQL
-       due to the support of things like structs and lateral views which trigger false
-       positives. It can be enabled with the ``force_enable = True`` flag.
+
+       This rule is disabled by default for BigQuery, Databricks, Hive,
+       Redshift, SOQL and SparkSQL due to the support of things like
+       structs and lateral views which trigger false positives. It can be
+       enabled with the ``force_enable = True`` flag.
 
     **Anti-pattern**
 
@@ -68,10 +67,19 @@ class Rule_L026(BaseRule):
 
     """
 
-    groups = ("all", "core")
+    name = "references.from"
+    aliases = ("L026",)
+    groups = ("all", "core", "references")
     config_keywords = ["force_enable"]
     crawl_behaviour = SegmentSeekerCrawler(set(_START_TYPES))
-    _dialects_disabled_by_default = ["bigquery", "hive", "redshift", "soql", "sparksql"]
+    _dialects_disabled_by_default = [
+        "bigquery",
+        "databricks",
+        "hive",
+        "redshift",
+        "soql",
+        "sparksql",
+    ]
 
     def _eval(self, context: RuleContext) -> EvalResultType:
         # Config type hints
@@ -100,9 +108,9 @@ class Rule_L026(BaseRule):
             # Verify table references in any SELECT statements found in or
             # below context.segment in the parser tree.
             crawler = SelectCrawler(
-                context.segment, context.dialect, query_class=L026Query
+                context.segment, context.dialect, query_class=RF01Query
             )
-            query: L026Query = cast(L026Query, crawler.query_tree)
+            query: RF01Query = cast(RF01Query, crawler.query_tree)
             if query:
                 self._analyze_table_references(
                     query, dml_target_table, context.dialect, violations
@@ -124,7 +132,7 @@ class Rule_L026(BaseRule):
 
     def _analyze_table_references(
         self,
-        query: L026Query,
+        query: RF01Query,
         dml_target_table: Optional[Tuple[str, ...]],
         dialect: Dialect,
         violations: List[LintResult],
@@ -160,7 +168,7 @@ class Rule_L026(BaseRule):
         # Visit children.
         for child in query.children:
             self._analyze_table_references(
-                cast(L026Query, child), dml_target_table, dialect, violations
+                cast(RF01Query, child), dml_target_table, dialect, violations
             )
 
     @staticmethod
@@ -207,7 +215,7 @@ class Rule_L026(BaseRule):
         return tbl_refs
 
     def _resolve_reference(
-        self, r, tbl_refs, dml_target_table: Optional[Tuple[str, ...]], query: L026Query
+        self, r, tbl_refs, dml_target_table: Optional[Tuple[str, ...]], query: RF01Query
     ):
         # Does this query define the referenced table?
         possible_references = [tbl_ref[1] for tbl_ref in tbl_refs]
@@ -220,7 +228,7 @@ class Rule_L026(BaseRule):
             # No. Check the parent query, if there is one.
             if query.parent:
                 return self._resolve_reference(
-                    r, tbl_refs, dml_target_table, cast(L026Query, query.parent)
+                    r, tbl_refs, dml_target_table, cast(RF01Query, query.parent)
                 )
             # No parent query. If there's a DML statement at the root, check its
             # target table or alias.
diff --git a/src/sqlfluff/rules/L027.py b/src/sqlfluff/rules/references/RF02.py
similarity index 92%
rename from src/sqlfluff/rules/L027.py
rename to src/sqlfluff/rules/references/RF02.py
index d79b42b..6ef3cd1 100644
--- a/src/sqlfluff/rules/L027.py
+++ b/src/sqlfluff/rules/references/RF02.py
@@ -1,15 +1,12 @@
-"""Implementation of Rule L027."""
+"""Implementation of Rule RF02."""
 import regex
 from typing import List, Optional
 
 from sqlfluff.core.rules import LintResult
-from sqlfluff.rules.L020 import Rule_L020
-from sqlfluff.core.rules.doc_decorators import document_configuration, document_groups
+from sqlfluff.rules.aliasing.AL04 import Rule_AL04
 
 
-@document_groups
-@document_configuration
-class Rule_L027(Rule_L020):
+class Rule_RF02(Rule_AL04):
     """References should be qualified if select has more than one referenced table/view.
 
     .. note::
@@ -37,8 +34,10 @@ class Rule_L027(Rule_L020):
         LEFT JOIN vee ON vee.a = foo.a
     """
 
-    groups = ("all",)
-    # Crawl behaviour is defined in L020
+    name = "references.qualification"
+    aliases = ("L027",)
+    groups = ("all", "references")
+    # Crawl behaviour is defined in AL04
 
     def _lint_references_and_aliases(
         self,
diff --git a/src/sqlfluff/rules/L028.py b/src/sqlfluff/rules/references/RF03.py
similarity index 97%
rename from src/sqlfluff/rules/L028.py
rename to src/sqlfluff/rules/references/RF03.py
index 607f9c4..5c185de 100644
--- a/src/sqlfluff/rules/L028.py
+++ b/src/sqlfluff/rules/references/RF03.py
@@ -1,4 +1,4 @@
-"""Implementation of Rule L028."""
+"""Implementation of Rule RF03."""
 
 from typing import Iterator, List, Optional, Set
 
@@ -16,21 +16,13 @@ from sqlfluff.core.rules import (
 )
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
 from sqlfluff.utils.functional import sp, FunctionalContext
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
 from sqlfluff.dialects.dialect_ansi import IdentifierSegment
 
 
 _START_TYPES = ["select_statement", "set_expression", "with_compound_statement"]
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L028(BaseRule):
+class Rule_RF03(BaseRule):
     """References should be consistent in statements with a single table.
 
     .. note::
@@ -72,7 +64,9 @@ class Rule_L028(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "references.consistent"
+    aliases = ("L028",)
+    groups = ("all", "references")
     config_keywords = [
         "single_table_references",
         "force_enable",
@@ -82,6 +76,7 @@ class Rule_L028(BaseRule):
     _dialects_with_structs = ["bigquery", "hive", "redshift"]
     # This could be turned into an option
     _fix_inconsistent_to = "qualified"
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> EvalResultType:
         """Override base class for dialects that use structs, or SELECT aliases."""
diff --git a/src/sqlfluff/rules/L029.py b/src/sqlfluff/rules/references/RF04.py
similarity index 93%
rename from src/sqlfluff/rules/L029.py
rename to src/sqlfluff/rules/references/RF04.py
index 54eaa8c..f0acb30 100644
--- a/src/sqlfluff/rules/L029.py
+++ b/src/sqlfluff/rules/references/RF04.py
@@ -1,16 +1,13 @@
-"""Implementation of Rule L029."""
+"""Implementation of Rule RF04."""
 import regex
 from typing import Optional
 
 from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_configuration, document_groups
-from sqlfluff.rules.L014 import identifiers_policy_applicable
+from sqlfluff.utils.identifers import identifiers_policy_applicable
 
 
-@document_groups
-@document_configuration
-class Rule_L029(BaseRule):
+class Rule_RF04(BaseRule):
     """Keywords should not be used as identifiers.
 
     Although `unreserved` keywords `can` be used as identifiers,
@@ -44,7 +41,9 @@ class Rule_L029(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "references.keywords"
+    aliases = ("L029",)
+    groups = ("all", "references")
     crawl_behaviour = SegmentSeekerCrawler({"naked_identifier", "quoted_identifier"})
     config_keywords = [
         "unquoted_identifiers_policy",
diff --git a/src/sqlfluff/rules/L057.py b/src/sqlfluff/rules/references/RF05.py
similarity index 92%
rename from src/sqlfluff/rules/L057.py
rename to src/sqlfluff/rules/references/RF05.py
index 551f0c8..4cac030 100644
--- a/src/sqlfluff/rules/L057.py
+++ b/src/sqlfluff/rules/references/RF05.py
@@ -1,17 +1,14 @@
-"""Implementation of Rule L057."""
+"""Implementation of Rule RF05."""
 from typing import Optional, Set, List
 
 import regex
 
 from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_configuration, document_groups
-from sqlfluff.rules.L014 import identifiers_policy_applicable
+from sqlfluff.utils.identifers import identifiers_policy_applicable
 
 
-@document_groups
-@document_configuration
-class Rule_L057(BaseRule):
+class Rule_RF05(BaseRule):
     """Do not use special characters in identifiers.
 
     **Anti-pattern**
@@ -44,7 +41,9 @@ class Rule_L057(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "references.special_chars"
+    aliases = ("L057",)
+    groups = ("all", "references")
     config_keywords = [
         "quoted_identifiers_policy",
         "unquoted_identifiers_policy",
@@ -134,7 +133,7 @@ class Rule_L057(BaseRule):
                     identifier = identifier[:-1]
                 identifier = identifier.replace(".", "")
 
-            # SparkSQL file references for direct file query
+            # Databricks & SparkSQL file references for direct file query
             # are quoted in back ticks to allow for identifiers common
             # in file paths and regex patterns for path globbing
             # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-file.html
@@ -143,8 +142,11 @@ class Rule_L057(BaseRule):
             # https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html#path-global-filter
             #
 
-            if context.dialect.name in ["sparksql"] and context.parent_stack:
-                # SparkSQL file references for direct file query
+            if (
+                context.dialect.name in ["databricks", "sparksql"]
+                and context.parent_stack
+            ):
+                # Databricks & SparkSQL file references for direct file query
                 # are quoted in back ticks to allow for identifiers common
                 # in file paths and regex patterns for path globbing
                 # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-file.html
@@ -155,7 +157,8 @@ class Rule_L057(BaseRule):
                 if context.parent_stack[-1].is_type("file_reference"):
                     return None
 
-                # SparkSQL properties keys used for setting table and runtime
+                # Databricks & SparkSQL properties keys
+                # used for setting table and runtime
                 # configurations denote namespace using dots, so these are
                 # removed before testing L057 to not trigger false positives
                 # Runtime configurations:
diff --git a/src/sqlfluff/rules/L059.py b/src/sqlfluff/rules/references/RF06.py
similarity index 84%
rename from src/sqlfluff/rules/L059.py
rename to src/sqlfluff/rules/references/RF06.py
index 5c54b83..c59d0dd 100644
--- a/src/sqlfluff/rules/L059.py
+++ b/src/sqlfluff/rules/references/RF06.py
@@ -1,4 +1,4 @@
-"""Implementation of Rule L059."""
+"""Implementation of Rule RF06."""
 
 from typing import List, Optional, cast, Type
 
@@ -7,18 +7,10 @@ import regex
 from sqlfluff.core.parser.segments.raw import CodeSegment
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
 from sqlfluff.utils.functional import sp, FunctionalContext
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L059(BaseRule):
+class Rule_RF06(BaseRule):
     """Unnecessary quoted identifier.
 
     This rule will fail if the quotes used to quote an identifier are (un)necessary
@@ -80,15 +72,19 @@ class Rule_L059(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "references.quoting"
+    aliases = ("L059",)
+    groups = ("all", "references")
     config_keywords = [
         "prefer_quoted_identifiers",
+        "prefer_quoted_keywords",
         "ignore_words",
         "ignore_words_regex",
         "force_enable",
     ]
     crawl_behaviour = SegmentSeekerCrawler({"quoted_identifier", "naked_identifier"})
     _dialects_allowing_quotes_in_column_names = ["postgres", "snowflake"]
+    is_fix_compatible = True
 
     # Ignore "password_auth" type to allow quotes around passwords within
     # `CREATE USER` statements in Exasol dialect.
@@ -98,6 +94,7 @@ class Rule_L059(BaseRule):
         """Unnecessary quoted identifier."""
         # Config type hints
         self.prefer_quoted_identifiers: bool
+        self.prefer_quoted_keywords: bool
         self.ignore_words: str
         self.ignore_words_regex: str
         self.force_enable: bool
@@ -116,12 +113,22 @@ class Rule_L059(BaseRule):
         if FunctionalContext(context).parent_stack.any(sp.is_type(*self._ignore_types)):
             return None
 
+        identifier_is_quoted = not regex.search(
+            r'^[^"\'].+[^"\']$', context.segment.raw
+        )
+
+        identifier_contents = context.segment.raw
+        if identifier_is_quoted:
+            identifier_contents = identifier_contents[1:-1]
+
+        identifier_is_keyword = identifier_contents.upper() in context.dialect.sets(
+            "reserved_keywords"
+        ) or identifier_contents.upper() in context.dialect.sets("unreserved_keywords")
+
         if self.prefer_quoted_identifiers:
             context_policy = "naked_identifier"
-            identifier_contents = context.segment.raw
         else:
             context_policy = "quoted_identifier"
-            identifier_contents = context.segment.raw[1:-1]
 
         # Get the ignore_words_list configuration.
         try:
@@ -141,6 +148,16 @@ class Rule_L059(BaseRule):
         ):
             return LintResult(memory=context.memory)
 
+        if self.prefer_quoted_keywords and identifier_is_keyword:
+            if not identifier_is_quoted:
+                return LintResult(
+                    context.segment,
+                    description=(
+                        f"Missing quoted keyword identifier {identifier_contents}."
+                    ),
+                )
+            return None
+
         # Ignore the segments that are not of the same type as the defined policy above.
         # Also TSQL has a keyword called QUOTED_IDENTIFIER which maps to the name so
         # need to explicitly check for that.
@@ -165,12 +182,9 @@ class Rule_L059(BaseRule):
         # Now we only deal with NOT forced quoted identifiers configuration
         # (meaning prefer_quoted_identifiers=False).
 
-        # Extract contents of outer quotes.
-        quoted_identifier_contents = context.segment.raw[1:-1]
-
         # Retrieve NakedIdentifierSegment RegexParser for the dialect.
         naked_identifier_parser = context.dialect._library["NakedIdentifierSegment"]
-        IdentifierSegment = cast(
+        NakedIdentifierSegment = cast(
             Type[CodeSegment], context.dialect.get_segment("IdentifierSegment")
         )
 
@@ -179,14 +193,14 @@ class Rule_L059(BaseRule):
         if (
             regex.fullmatch(
                 naked_identifier_parser.template,
-                quoted_identifier_contents,
+                identifier_contents,
                 regex.IGNORECASE,
             )
             is not None
         ) and (
             regex.fullmatch(
                 naked_identifier_parser.anti_template,
-                quoted_identifier_contents,
+                identifier_contents,
                 regex.IGNORECASE,
             )
             is None
@@ -197,8 +211,8 @@ class Rule_L059(BaseRule):
                     LintFix.replace(
                         context.segment,
                         [
-                            IdentifierSegment(
-                                raw=quoted_identifier_contents,
+                            NakedIdentifierSegment(
+                                raw=identifier_contents,
                                 type="naked_identifier",
                             )
                         ],
diff --git a/src/sqlfluff/rules/references/__init__.py b/src/sqlfluff/rules/references/__init__.py
new file mode 100644
index 0000000..eccd443
--- /dev/null
+++ b/src/sqlfluff/rules/references/__init__.py
@@ -0,0 +1,16 @@
+"""The references plugin bundle."""
+
+from sqlfluff.core.plugin import hookimpl
+
+from sqlfluff.rules.references.RF01 import Rule_RF01
+from sqlfluff.rules.references.RF02 import Rule_RF02
+from sqlfluff.rules.references.RF03 import Rule_RF03
+from sqlfluff.rules.references.RF04 import Rule_RF04
+from sqlfluff.rules.references.RF05 import Rule_RF05
+from sqlfluff.rules.references.RF06 import Rule_RF06
+
+
+@hookimpl
+def get_rules():
+    """Get plugin rules."""
+    return [Rule_RF01, Rule_RF02, Rule_RF03, Rule_RF04, Rule_RF05, Rule_RF06]
diff --git a/src/sqlfluff/rules/L035.py b/src/sqlfluff/rules/structure/ST01.py
similarity index 91%
rename from src/sqlfluff/rules/L035.py
rename to src/sqlfluff/rules/structure/ST01.py
index 8011514..f17eeb3 100644
--- a/src/sqlfluff/rules/L035.py
+++ b/src/sqlfluff/rules/structure/ST01.py
@@ -1,15 +1,12 @@
-"""Implementation of Rule L035."""
-from typing import Optional
+"""Implementation of Rule ST01."""
+from typing import Optional, Tuple
 
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 from sqlfluff.utils.functional import sp, FunctionalContext
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L035(BaseRule):
+class Rule_ST01(BaseRule):
     """Do not specify ``else null`` in a case when statement (redundant).
 
     **Anti-pattern**
@@ -38,8 +35,11 @@ class Rule_L035(BaseRule):
         from x
     """
 
-    groups = ("all",)
+    name = "structure.else_null"
+    aliases = ("L035",)
+    groups: Tuple[str, ...] = ("all", "structure")
     crawl_behaviour = SegmentSeekerCrawler({"case_expression"})
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
         """Find rule violations and provide fixes.
diff --git a/src/sqlfluff/rules/L043.py b/src/sqlfluff/rules/structure/ST02.py
similarity index 97%
rename from src/sqlfluff/rules/L043.py
rename to src/sqlfluff/rules/structure/ST02.py
index 8378688..ab88d1b 100644
--- a/src/sqlfluff/rules/L043.py
+++ b/src/sqlfluff/rules/structure/ST02.py
@@ -1,5 +1,5 @@
-"""Implementation of Rule L043."""
-from typing import List, Optional
+"""Implementation of Rule ST02."""
+from typing import List, Optional, Tuple
 
 from sqlfluff.core.parser import (
     WhitespaceSegment,
@@ -10,13 +10,10 @@ from sqlfluff.core.parser.segments.base import BaseSegment
 
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 from sqlfluff.utils.functional import Segments, sp, FunctionalContext
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L043(BaseRule):
+class Rule_ST02(BaseRule):
     """Unnecessary ``CASE`` statement.
 
     **Anti-pattern**
@@ -78,8 +75,11 @@ class Rule_L043(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "structure.simple_case"
+    aliases = ("L043",)
+    groups: Tuple[str, ...] = ("all", "structure")
     crawl_behaviour = SegmentSeekerCrawler({"case_expression"})
+    is_fix_compatible = True
 
     @staticmethod
     def _coalesce_fix_list(
diff --git a/src/sqlfluff/rules/L045.py b/src/sqlfluff/rules/structure/ST03.py
similarity index 93%
rename from src/sqlfluff/rules/L045.py
rename to src/sqlfluff/rules/structure/ST03.py
index 34e988c..dccce9b 100644
--- a/src/sqlfluff/rules/L045.py
+++ b/src/sqlfluff/rules/structure/ST03.py
@@ -1,14 +1,12 @@
-"""Implementation of Rule L045."""
+"""Implementation of Rule ST03."""
 from typing import Iterator
 
 from sqlfluff.core.rules import BaseRule, EvalResultType, LintResult, RuleContext
 from sqlfluff.utils.analysis.select_crawler import Query, SelectCrawler
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_groups
 
 
-@document_groups
-class Rule_L045(BaseRule):
+class Rule_ST03(BaseRule):
     """Query defines a CTE (common-table expression) but does not use it.
 
     **Anti-pattern**
@@ -45,7 +43,9 @@ class Rule_L045(BaseRule):
         FROM cte1
     """
 
-    groups = ("all", "core")
+    name = "structure.unused_cte"
+    aliases = ("L045",)
+    groups = ("all", "core", "structure")
     crawl_behaviour = SegmentSeekerCrawler({"statement"})
 
     @classmethod
diff --git a/src/sqlfluff/rules/L058.py b/src/sqlfluff/rules/structure/ST04.py
similarity index 86%
rename from src/sqlfluff/rules/L058.py
rename to src/sqlfluff/rules/structure/ST04.py
index 7611026..4982d58 100644
--- a/src/sqlfluff/rules/L058.py
+++ b/src/sqlfluff/rules/structure/ST04.py
@@ -1,15 +1,14 @@
-"""Implementation of Rule L058."""
+"""Implementation of Rule ST04."""
 
 from sqlfluff.core.parser import NewlineSegment, WhitespaceSegment
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 from sqlfluff.utils.functional import sp, FunctionalContext
 
+from sqlfluff.utils.reflow.reindent import construct_single_indent
 
-@document_groups
-@document_fix_compatible
-class Rule_L058(BaseRule):
+
+class Rule_ST04(BaseRule):
     """Nested ``CASE`` statement in ``ELSE`` clause could be flattened.
 
     **Anti-pattern**
@@ -43,8 +42,11 @@ class Rule_L058(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "structure.nested_case"
+    aliases = ("L058",)
+    groups = ("all", "structure")
     crawl_behaviour = SegmentSeekerCrawler({"case_expression"})
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> LintResult:
         """Nested CASE statement in ELSE clause could be flattened."""
@@ -82,6 +84,9 @@ class Rule_L058(BaseRule):
         # Delete the nested "CASE" expression.
         fixes = case1_to_delete.apply(lambda seg: LintFix.delete(seg))
 
+        tab_space_size: int = context.config.get("tab_space_size", ["indentation"])
+        indent_unit: str = context.config.get("indent_unit", ["indentation"])
+
         # Determine the indentation to use when we move the nested "WHEN"
         # and "ELSE" clauses, based on the indentation of case1_last_when.
         # If no whitespace segments found, use default indent.
@@ -90,7 +95,11 @@ class Rule_L058(BaseRule):
             .reversed()
             .select(sp.is_type("whitespace"))
         )
-        indent_str = "".join(seg.raw for seg in indent) if indent else self.indent
+        indent_str = (
+            "".join(seg.raw for seg in indent)
+            if indent
+            else construct_single_indent(indent_unit, tab_space_size)
+        )
 
         # Move the nested "when" and "else" clauses after the last outer
         # "when".
diff --git a/src/sqlfluff/rules/L042.py b/src/sqlfluff/rules/structure/ST05.py
similarity index 98%
rename from src/sqlfluff/rules/L042.py
rename to src/sqlfluff/rules/structure/ST05.py
index 62c6b1a..6bc01a0 100644
--- a/src/sqlfluff/rules/L042.py
+++ b/src/sqlfluff/rules/structure/ST05.py
@@ -1,5 +1,4 @@
-"""Implementation of Rule L042."""
-import copy
+"""Implementation of Rule ST05."""
 from functools import partial
 from typing import (
     Iterator,
@@ -33,11 +32,6 @@ from sqlfluff.core.rules import (
 from sqlfluff.utils.analysis.select import get_select_statement_info
 from sqlfluff.utils.analysis.select_crawler import Query, Selectable, SelectCrawler
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
-    document_fix_compatible,
-    document_groups,
-)
 from sqlfluff.utils.functional.segment_predicates import (
     is_keyword,
     is_type,
@@ -67,10 +61,7 @@ class _NestedSubQuerySummary(NamedTuple):
     select_source_names: Set[str]
 
 
-@document_groups
-@document_fix_compatible
-@document_configuration
-class Rule_L042(BaseRule):
+class Rule_ST05(BaseRule):
     """Join/From clauses should not contain subqueries. Use CTEs instead.
 
     By default this rule is configured to allow subqueries within ``FROM``
@@ -107,7 +98,9 @@ class Rule_L042(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "structure.subquery"
+    aliases = ("L042",)
+    groups = ("all", "structure")
     config_keywords = ["forbid_subquery_in"]
     crawl_behaviour = SegmentSeekerCrawler(set(_SELECT_TYPES))
 
@@ -116,6 +109,7 @@ class Rule_L042(BaseRule):
         "from": ["from_expression_element"],
         "both": ["join_clause", "from_expression_element"],
     }
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> EvalResultType:
         """Join/From clauses should not contain subqueries. Use CTEs instead."""
@@ -562,7 +556,7 @@ class SegmentCloneMap:
     """Clones a segment tree, maps from original segments to their clones."""
 
     def __init__(self, segment: BaseSegment):
-        segment_copy = copy.deepcopy(segment)
+        segment_copy = segment.copy()
         self.segment_map = {}
         for old_segment, new_segment in zip(
             segment.recursive_crawl_all(),
diff --git a/src/sqlfluff/rules/L034.py b/src/sqlfluff/rules/structure/ST06.py
similarity index 97%
rename from src/sqlfluff/rules/L034.py
rename to src/sqlfluff/rules/structure/ST06.py
index 73ba405..1fca91d 100644
--- a/src/sqlfluff/rules/L034.py
+++ b/src/sqlfluff/rules/structure/ST06.py
@@ -1,4 +1,4 @@
-"""Implementation of Rule L034."""
+"""Implementation of Rule ST06."""
 from typing import Iterator, List, Optional
 
 from sqlfluff.core.parser import BaseSegment
@@ -10,12 +10,9 @@ from sqlfluff.core.rules import (
     RuleContext,
 )
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L034(BaseRule):
+class Rule_ST06(BaseRule):
     """Select wildcards then simple targets before calculations and aggregates.
 
     **Anti-pattern**
@@ -45,8 +42,11 @@ class Rule_L034(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "structure.column_order"
+    aliases = ("L034",)
+    groups = ("all", "structure")
     crawl_behaviour = SegmentSeekerCrawler({"select_clause"})
+    is_fix_compatible = True
 
     def _validate(self, i: int, segment: BaseSegment) -> None:
         # Check if we've seen a more complex select target element already
@@ -218,7 +218,7 @@ class Rule_L034(BaseRule):
     def _implicit_column_references(cls, segment: BaseSegment) -> Iterator[BaseSegment]:
         """Yield any implicit ORDER BY or GROUP BY column references.
 
-        This function was adapted from similar code in L054.
+        This function was adapted from similar code in AM06.
         """
         _ignore_types: List[str] = ["withingroup_clause", "window_specification"]
         if not segment.is_type(*_ignore_types):  # Ignore Windowing clauses
diff --git a/src/sqlfluff/rules/L032.py b/src/sqlfluff/rules/structure/ST07.py
similarity index 92%
rename from src/sqlfluff/rules/L032.py
rename to src/sqlfluff/rules/structure/ST07.py
index be857a6..4b78512 100644
--- a/src/sqlfluff/rules/L032.py
+++ b/src/sqlfluff/rules/structure/ST07.py
@@ -1,4 +1,4 @@
-"""Implementation of Rule L032."""
+"""Implementation of Rule ST07."""
 from typing import List, Optional, Tuple
 from sqlfluff.core.parser.segments.base import BaseSegment
 from sqlfluff.core.parser.segments.raw import (
@@ -8,15 +8,12 @@ from sqlfluff.core.parser.segments.raw import (
 )
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 from sqlfluff.utils.functional import Segments, sp, FunctionalContext
 from sqlfluff.utils.analysis.select import get_select_statement_info
 from sqlfluff.dialects.dialect_ansi import ColumnReferenceSegment, IdentifierSegment
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L032(BaseRule):
+class Rule_ST07(BaseRule):
     """Prefer specifying join keys instead of using ``USING``.
 
     .. note::
@@ -32,6 +29,11 @@ class Rule_L032(BaseRule):
        rule, so for now we will keep it in SQLFluff, but encourage those that
        do not find value in the rule, to turn it off.
 
+    .. note::
+
+       This rule is disabled for ClickHouse as it supports ``USING`` without
+       brackets which this rule does not support.
+
     **Anti-pattern**
 
     .. code-block:: sql
@@ -59,10 +61,19 @@ class Rule_L032(BaseRule):
 
     """
 
-    groups = ("all",)
+    name = "structure.using"
+    aliases = ("L032",)
+    groups: Tuple[str, ...] = ("all", "structure")
     crawl_behaviour = SegmentSeekerCrawler({"join_clause"})
+    is_fix_compatible = True
+    _dialects_disabled_by_default = [
+        "clickhouse",
+    ]
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
+        if context.dialect.name in self._dialects_disabled_by_default:
+            return LintResult()
+
         """Look for USING in a join clause."""
         segment = FunctionalContext(context).segment
         parent_stack = FunctionalContext(context).parent_stack
diff --git a/src/sqlfluff/rules/L015.py b/src/sqlfluff/rules/structure/ST08.py
similarity index 95%
rename from src/sqlfluff/rules/L015.py
rename to src/sqlfluff/rules/structure/ST08.py
index b85714b..7bca24e 100644
--- a/src/sqlfluff/rules/L015.py
+++ b/src/sqlfluff/rules/structure/ST08.py
@@ -1,17 +1,14 @@
-"""Implementation of Rule L015."""
+"""Implementation of Rule ST08."""
 from typing import Optional
 
 from sqlfluff.core.parser import KeywordSegment, WhitespaceSegment
 from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_fix_compatible, document_groups
 from sqlfluff.utils.functional import sp, FunctionalContext
 from sqlfluff.utils.reflow.sequence import ReflowSequence
 
 
-@document_groups
-@document_fix_compatible
-class Rule_L015(BaseRule):
+class Rule_ST08(BaseRule):
     """``DISTINCT`` used with parentheses.
 
     **Anti-pattern**
@@ -35,8 +32,11 @@ class Rule_L015(BaseRule):
 
     """
 
-    groups = ("all", "core")
+    name = "structure.distinct"
+    aliases = ("L015",)
+    groups = ("all", "structure", "core")
     crawl_behaviour = SegmentSeekerCrawler({"select_clause", "function"})
+    is_fix_compatible = True
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
         """Looking for DISTINCT before a bracket.
diff --git a/src/sqlfluff/rules/structure/__init__.py b/src/sqlfluff/rules/structure/__init__.py
new file mode 100644
index 0000000..00205c1
--- /dev/null
+++ b/src/sqlfluff/rules/structure/__init__.py
@@ -0,0 +1,27 @@
+"""The structure plugin bundle."""
+
+from sqlfluff.core.plugin import hookimpl
+
+from sqlfluff.rules.structure.ST01 import Rule_ST01
+from sqlfluff.rules.structure.ST02 import Rule_ST02
+from sqlfluff.rules.structure.ST03 import Rule_ST03
+from sqlfluff.rules.structure.ST04 import Rule_ST04
+from sqlfluff.rules.structure.ST05 import Rule_ST05
+from sqlfluff.rules.structure.ST06 import Rule_ST06
+from sqlfluff.rules.structure.ST07 import Rule_ST07
+from sqlfluff.rules.structure.ST08 import Rule_ST08
+
+
+@hookimpl
+def get_rules():
+    """Get plugin rules."""
+    return [
+        Rule_ST01,
+        Rule_ST02,
+        Rule_ST03,
+        Rule_ST04,
+        Rule_ST05,
+        Rule_ST06,
+        Rule_ST07,
+        Rule_ST08,
+    ]
diff --git a/src/sqlfluff/rules/L056.py b/src/sqlfluff/rules/tsql/TQ01.py
similarity index 93%
rename from src/sqlfluff/rules/L056.py
rename to src/sqlfluff/rules/tsql/TQ01.py
index abb44b0..943a493 100644
--- a/src/sqlfluff/rules/L056.py
+++ b/src/sqlfluff/rules/tsql/TQ01.py
@@ -1,13 +1,11 @@
-"""Implementation of Rule L056."""
+"""Implementation of Rule TQ01."""
 from typing import Optional
 
 from sqlfluff.core.rules import BaseRule, LintResult, RuleContext
 from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler
-from sqlfluff.core.rules.doc_decorators import document_groups
 
 
-@document_groups
-class Rule_L056(BaseRule):
+class Rule_TQ01(BaseRule):
     r"""``SP_`` prefix should not be used for user-defined stored procedures in T-SQL.
 
     **Anti-pattern**
@@ -54,7 +52,9 @@ class Rule_L056(BaseRule):
         FROM table1
     """
 
-    groups = ("all",)
+    name = "tsql.sp_prefix"
+    aliases = ("L056",)
+    groups = ("all", "tsql")
     crawl_behaviour = SegmentSeekerCrawler({"create_procedure_statement"})
 
     def _eval(self, context: RuleContext) -> Optional[LintResult]:
diff --git a/src/sqlfluff/rules/tsql/__init__.py b/src/sqlfluff/rules/tsql/__init__.py
new file mode 100644
index 0000000..1817e41
--- /dev/null
+++ b/src/sqlfluff/rules/tsql/__init__.py
@@ -0,0 +1,17 @@
+"""The tsql rules plugin bundle.
+
+This plugin bundles linting rules which apply exclusively to TSQL. At some
+point in the future it might be useful to spin this off into a separate
+installable python package, but so long as the number of rules remain
+low, it makes sense to keep it bundled with SQLFluff core.
+"""
+
+from sqlfluff.core.plugin import hookimpl
+
+from sqlfluff.rules.tsql.TQ01 import Rule_TQ01
+
+
+@hookimpl
+def get_rules():
+    """Get plugin rules."""
+    return [Rule_TQ01]
diff --git a/src/sqlfluff/utils/functional/raw_file_slices.py b/src/sqlfluff/utils/functional/raw_file_slices.py
index afa298e..b1e167c 100644
--- a/src/sqlfluff/utils/functional/raw_file_slices.py
+++ b/src/sqlfluff/utils/functional/raw_file_slices.py
@@ -49,7 +49,8 @@ class RawFileSlices(tuple):
         buff = []
         for slice_ in self[start_index + 1 : stop_index]:
             if loop_while is not None and not loop_while(slice_):
-                break
+                # NOTE: This likely needs more tests.
+                break  # pragma: no cover
             if select_if is None or select_if(slice_):
                 buff.append(slice_)
         return RawFileSlices(*buff, templated_file=self.templated_file)
diff --git a/src/sqlfluff/utils/functional/segment_predicates.py b/src/sqlfluff/utils/functional/segment_predicates.py
index 7b22161..25679f6 100644
--- a/src/sqlfluff/utils/functional/segment_predicates.py
+++ b/src/sqlfluff/utils/functional/segment_predicates.py
@@ -13,10 +13,7 @@ from typing import Callable, Optional
 from sqlfluff.core.parser import BaseSegment
 from sqlfluff.utils.functional.raw_file_slices import RawFileSlices
 from sqlfluff.utils.functional.templated_file_slices import TemplatedFileSlices
-from sqlfluff.core.templaters.base import (
-    RawFileSlice,
-    TemplatedFile,
-)
+from sqlfluff.core.templaters.base import TemplatedFile
 
 
 def raw_is(*raws: str) -> Callable[[BaseSegment], bool]:  # pragma: no cover
@@ -188,7 +185,7 @@ def templated_slices(
         )  # pragma: no cover
     # :TRICKY: We don't use _find_slice_indices_of_templated_pos() here because
     # it treats TemplatedFileSlice.templated_slice.stop as inclusive, not
-    # exclusive. Other parts of SQLFluff rely on this behavior, but we don't
+    # exclusive. Other parts of SQLFluff rely on this behaviour, but we don't
     # want it. It's easy enough to do this ourselves.
     start = segment.pos_marker.templated_slice.start
     stop = segment.pos_marker.templated_slice.stop
@@ -198,22 +195,3 @@ def templated_slices(
         if (stop > slice_.templated_slice.start and start < slice_.templated_slice.stop)
     ]
     return TemplatedFileSlices(*templated_slices, templated_file=templated_file)
-
-
-def raw_slice(segment: BaseSegment, raw_slice_: RawFileSlice) -> str:
-    """Return the portion of a segment's source provided by raw_slice."""
-    result = ""
-    if not segment.pos_marker:
-        raise ValueError(
-            'raw_slice: "segment" parameter must have pos_marker set.'
-        )  # pragma: no cover
-    seg_start = segment.pos_marker.source_slice.start
-    seg_stop = segment.pos_marker.source_slice.stop
-    if seg_start != seg_stop:
-        start = max(seg_start, raw_slice_.source_idx)
-        stop = min(
-            seg_stop,
-            raw_slice_.source_idx + len(raw_slice_.raw),
-        )
-        result = segment.pos_marker.templated_file.source_str[slice(start, stop)]
-    return result
diff --git a/src/sqlfluff/utils/identifers.py b/src/sqlfluff/utils/identifers.py
new file mode 100644
index 0000000..0357be5
--- /dev/null
+++ b/src/sqlfluff/utils/identifers.py
@@ -0,0 +1,32 @@
+"""Helper utilities for identifiers.
+
+These are primarily common functions used by multiple rule
+bundles. Defined here to avoid duplication, but also avoid
+circular imports.
+"""
+
+from typing import Tuple
+
+from sqlfluff.core.parser import BaseSegment
+
+
+def identifiers_policy_applicable(
+    policy: str, parent_stack: Tuple[BaseSegment, ...]
+) -> bool:
+    """Does `(un)quoted_identifiers_policy` apply to this segment?
+
+    This method is used in CP02, RF04 and RF05.
+    """
+    if policy == "all":
+        return True
+    if policy == "none":
+        return False
+    is_alias = parent_stack and parent_stack[-1].is_type(
+        "alias_expression", "column_definition", "with_compound_statement"
+    )
+    if policy == "aliases" and is_alias:
+        return True
+    is_inside_from = any(p.is_type("from_clause") for p in parent_stack)
+    if policy == "column_aliases" and is_alias and not is_inside_from:
+        return True
+    return False
diff --git a/src/sqlfluff/utils/reflow/config.py b/src/sqlfluff/utils/reflow/config.py
index f58eca0..c2ecbe0 100644
--- a/src/sqlfluff/utils/reflow/config.py
+++ b/src/sqlfluff/utils/reflow/config.py
@@ -4,7 +4,7 @@
 # Until we have a proper structure this will work.
 # TODO: Migrate this to the config file.
 from dataclasses import dataclass
-from typing import AbstractSet, Dict, Set, Optional
+from typing import AbstractSet, Dict, FrozenSet, Set, Optional
 
 from sqlfluff.core.config import FluffConfig
 from sqlfluff.utils.reflow.depthmap import DepthInfo
@@ -57,9 +57,19 @@ class ReflowConfig:
 
     _config_dict: ConfigDictType
     config_types: Set[str]
+    # In production, these values are almost _always_ set because we
+    # use `.from_fluff_config`, but the defaults are here to aid in
+    # testing.
+    tab_space_size: int = 4
+    indent_unit: str = "    "
+    max_line_length: int = 80
+    hanging_indents: bool = False
+    skip_indentation_in: FrozenSet[str] = frozenset()
+    allow_implicit_indents: bool = False
+    trailing_comments: str = "before"
 
     @classmethod
-    def from_dict(cls, config_dict: ConfigDictType):
+    def from_dict(cls, config_dict: ConfigDictType, **kwargs):
         """Construct a ReflowConfig from a dict."""
         config_types = set(config_dict.keys())
         # Enrich any of the "align" keys with what they're aligning with.
@@ -74,12 +84,25 @@ class ReflowConfig:
                         if config_dict[seg_type].get("align_scope", None):
                             new_key += ":" + config_dict[seg_type]["align_scope"]
                     config_dict[seg_type][key] = new_key
-        return cls(_config_dict=config_dict, config_types=config_types)
+        return cls(_config_dict=config_dict, config_types=config_types, **kwargs)
 
     @classmethod
     def from_fluff_config(cls, config: FluffConfig):
         """Constructs a ReflowConfig from a FluffConfig."""
-        return cls.from_dict(config.get_section(["layout", "type"]))
+        return cls.from_dict(
+            config.get_section(["layout", "type"]),
+            indent_unit=config.get("indent_unit", ["indentation"]),
+            tab_space_size=config.get("tab_space_size", ["indentation"]),
+            hanging_indents=config.get("hanging_indents", ["indentation"]),
+            max_line_length=config.get("max_line_length"),
+            skip_indentation_in=frozenset(
+                config.get("skip_indentation_in", ["indentation"]).split(",")
+            ),
+            allow_implicit_indents=config.get(
+                "allow_implicit_indents", ["indentation"]
+            ),
+            trailing_comments=config.get("trailing_comments", ["indentation"]),
+        )
 
     def get_block_config(
         self,
diff --git a/src/sqlfluff/utils/reflow/depthmap.py b/src/sqlfluff/utils/reflow/depthmap.py
index 4cbdce1..92ce5bb 100644
--- a/src/sqlfluff/utils/reflow/depthmap.py
+++ b/src/sqlfluff/utils/reflow/depthmap.py
@@ -23,11 +23,16 @@ class StackPosition:
     @staticmethod
     def _stack_pos_interpreter(path_step: PathStep) -> str:
         """Interpret a path step for stack_positions."""
-        if path_step.idx == 0 and path_step.idx == path_step.len - 1:
+        # If no code, then no.
+        if not path_step.code_idxs:
+            return ""
+        # If there's only one code element, this must be it.
+        elif len(path_step.code_idxs) == 1:
             return "solo"
-        elif path_step.idx == 0:
+        # Check for whether first or last code element
+        elif path_step.idx == min(path_step.code_idxs):
             return "start"
-        elif path_step.idx == path_step.len - 1:
+        elif path_step.idx == max(path_step.code_idxs):
             return "end"
         else:
             return ""  # NOTE: Empty string evaluates as falsy.
diff --git a/src/sqlfluff/utils/reflow/elements.py b/src/sqlfluff/utils/reflow/elements.py
index 62ee9e2..30ed225 100644
--- a/src/sqlfluff/utils/reflow/elements.py
+++ b/src/sqlfluff/utils/reflow/elements.py
@@ -5,11 +5,15 @@ import logging
 from dataclasses import dataclass
 from typing import Dict, List, Optional, Sequence, Set, Tuple, Type, Union, cast
 
-from sqlfluff.core.parser import BaseSegment, RawSegment
+from sqlfluff.core.parser import PositionMarker
 from sqlfluff.core.parser.segments import (
+    BaseSegment,
+    RawSegment,
     NewlineSegment,
     WhitespaceSegment,
     TemplateSegment,
+    Indent,
+    SourceFix,
 )
 from sqlfluff.core.rules.base import LintFix, LintResult
 
@@ -30,6 +34,27 @@ from sqlfluff.utils.reflow.respace import (
 reflow_logger = logging.getLogger("sqlfluff.rules.reflow")
 
 
+def get_consumed_whitespace(segment: Optional[RawSegment]) -> Optional[str]:
+    """A helper function to extract possible consumed whitespace.
+
+    Args:
+        segment (:obj:`RawSegment`, optional): A segment to test for
+            suitability and extract the source representation of if
+            appropriate. If passed None, then returns None.
+
+    Returns:
+        Returns the :code:`source_str` if the segment is of type
+        :code:`placeholder` and has a :code:`block_type` of
+        :code:`literal`. Otherwise None.
+    """
+    if not segment or not segment.is_type("placeholder"):
+        return None
+    placeholder = cast(TemplateSegment, segment)
+    if placeholder.block_type != "literal":
+        return None
+    return placeholder.source_str
+
+
 @dataclass(frozen=True)
 class ReflowElement:
     """Base reflow element class."""
@@ -53,9 +78,26 @@ class ReflowElement:
         """Get the current raw representation."""
         return "".join(seg.raw for seg in self.segments)
 
+    @property
+    def pos_marker(self) -> Optional[PositionMarker]:
+        """Get the first position marker of the element."""
+        for seg in self.segments:
+            if seg.pos_marker:
+                return seg.pos_marker
+        return None
+
     def num_newlines(self) -> int:
-        """Return the number of newlines in this element."""
-        return sum(bool("newline" in seg.class_types) for seg in self.segments)
+        """Return the number of newlines in this element.
+
+        These newlines are either newline segments or contained
+        within consumed sections of whitespace. This counts
+        both.
+        """
+        return sum(
+            bool("newline" in seg.class_types)
+            + (get_consumed_whitespace(seg) or "").count("\n")
+            for seg in self.segments
+        )
 
 
 @dataclass(frozen=True)
@@ -133,9 +175,21 @@ class ReflowBlock(ReflowElement):
 
 
 def _indent_description(indent: str):
-    """Construct a human readable description of the indent."""
+    """Construct a human readable description of the indent.
+
+    NOTE: We operate assuming that the "correct" indent is
+    never a mix of tabs and spaces. That means if the provided
+    indent *does* contain both that this description is likely
+    a case where we are matching a pre-existing indent, and can
+    assume that the *description* of that indent is non-critical.
+    To handle that situation gracefully we just return "Mixed Indent".
+
+    See: https://github.com/sqlfluff/sqlfluff/issues/4255
+    """
     if indent == "":
         return "no indent"
+    elif " " in indent and "\t" in indent:
+        return "mixed indent"
     elif indent[0] == " ":
         assert all(c == " " for c in indent)
         return f"indent of {len(indent)} spaces"
@@ -146,6 +200,53 @@ def _indent_description(indent: str):
         raise NotImplementedError(f"Invalid indent construction: {indent!r}")
 
 
+@dataclass(frozen=True)
+class IndentStats:
+    """Dataclass to hold summary of indents in a point.
+
+    Attributes:
+        impulse (int): The net change when summing the impulses
+            of all the consecutive indent or dedent segments in
+            a point.
+        trough (int): The lowest point reached when summing the
+            impulses (in order) of all the consecutive indent or
+            dedent segments in a point.
+        implicit_indents (tuple of int): The indent balance
+            corresponding to any detected (and enabled) implicit
+            indents. This follows the usual convention that indents
+            are identified by their "uphill" side. A positive indent
+            is identified by the indent balance _after_ and a negative
+            indent is identified by the indent balance _before_.
+    """
+
+    impulse: int
+    trough: int
+    # Defaults to an empty tuple if unset.
+    implicit_indents: Tuple[int, ...] = ()
+
+    @classmethod
+    def from_combination(cls, first: Optional["IndentStats"], second: "IndentStats"):
+        """Create IndentStats from two consecutive IndentStats.
+
+        This is mostly used for combining the effects of indent and dedent
+        tokens either side of a comment.
+
+        NOTE: The *first* is considered optional, because if we're
+        calling this function, we're assuming that there's always
+        a second.
+        """
+        # First check for the trivial case that we only have one.
+        if not first:
+            return second
+
+        # Otherwise, combine the two into one.
+        return cls(
+            first.impulse + second.impulse,
+            min(first.trough, first.impulse + second.trough),
+            second.implicit_indents,
+        )
+
+
 @dataclass(frozen=True)
 class ReflowPoint(ReflowElement):
     """Class for keeping track of editable elements in reflow.
@@ -164,26 +265,96 @@ class ReflowPoint(ReflowElement):
     """
 
     def _get_indent_segment(self) -> Optional[RawSegment]:
-        """Get the current indent segment (if there)."""
+        """Get the current indent segment (if there).
+
+        NOTE: This only returns _untemplated_ indents. If templated
+        newline or whitespace segments are found they are skipped.
+        """
         indent = None
         for seg in reversed(self.segments):
-            if seg.is_type("newline"):
+            if seg.pos_marker and not seg.pos_marker.is_literal():
+                # Skip any templated elements.
+                # NOTE: It must _have_ a position marker at this
+                # point however to take this route. A segment
+                # without a position marker at all, is an edit
+                # or insertion, and so should still be considered.
+                continue
+            elif seg.is_type("newline"):
                 return indent
             elif seg.is_type("whitespace"):
                 indent = seg
+            elif "\n" in (get_consumed_whitespace(seg) or ""):
+                # Consumed whitespace case.
+                # NOTE: In this situation, we're not looking for
+                # separate newline and indent segments, we're
+                # making the assumption that they'll be together
+                # which I think is a safe one for now.
+                return seg
         # i.e. if we never find a newline, it's not an indent.
         return None
 
     def get_indent(self) -> Optional[str]:
         """Get the current indent (if there)."""
+        # If no newlines, it's not an indent. Return None.
+        if not self.num_newlines():
+            return None
+        # If there are newlines but no indent segment. Return "".
         seg = self._get_indent_segment()
-        return seg.raw if seg else None
+        consumed_whitespace = get_consumed_whitespace(seg)
+        if consumed_whitespace:  # pragma: no cover
+            # Return last bit after newline.
+            # NOTE: Not tested, because usually this would happen
+            # directly via _get_indent_segment.
+            return consumed_whitespace.split("\n")[-1]
+        return seg.raw if seg else ""
+
+    def get_indent_impulse(
+        self,
+        allow_implicit_indents: bool = False,
+        following_class_types: Set[str] = set(),
+    ) -> IndentStats:
+        """Get the change in intended indent balance from this point.
+
+        NOTE: The reason we check `following_class_types` is because
+        bracketed expressions behave a little differently and are an
+        exception to the normal implicit indent rules. For implicit
+        indents which precede bracketed expressions, the implicit indent
+        is treated as a normal indent.
+
+        Returns:
+            :obj:`tuple` of :obj:`int`: The first value is the raw
+                impulse. The second is the deepest trough in the indent
+                through the values to allow wiping of buffers.
+        """
+        trough = 0
+        running_sum = 0
+        implicit_indents = []
+        for seg in self.segments:
+            if seg.is_type("indent"):
+                indent_seg = cast(Indent, seg)
+                running_sum += indent_seg.indent_val
+                # Do we need to add a new implicit indent?
+                if (
+                    allow_implicit_indents
+                    and indent_seg.is_implicit
+                    and "start_bracket" not in following_class_types
+                ):
+                    implicit_indents.append(running_sum)
+                # NOTE: We don't check for removal of implicit indents
+                # because it's unlikely that one would be opened, and then
+                # closed within the same point. That would probably be the
+                # sign of a bug in the dialect.
+            if running_sum < trough:
+                trough = running_sum
+        return IndentStats(running_sum, trough, tuple(implicit_indents))
 
     def indent_to(
         self,
         desired_indent: str,
         after: Optional[BaseSegment] = None,
         before: Optional[BaseSegment] = None,
+        description: Optional[str] = None,
+        source: Optional[str] = None,
     ) -> Tuple[List[LintResult], "ReflowPoint"]:
         """Coerce a point to have a particular indent.
 
@@ -194,7 +365,11 @@ class ReflowPoint(ReflowElement):
         More specifically, the newline is *inserted before* the existing
         whitespace, with the new indent being a *replacement* for that
         same whitespace.
+
+        For placeholder newlines or indents we generate appropriate
+        source fixes.
         """
+        assert "\n" not in desired_indent, "Newline found in desired indent."
         # Get the indent (or in the case of no newline, the last whitespace)
         indent_seg = self._get_indent_segment()
         reflow_logger.debug(
@@ -203,22 +378,79 @@ class ReflowPoint(ReflowElement):
             desired_indent,
             self.num_newlines(),
         )
-        if self.num_newlines():
-            # There is already a newline.
+
+        if indent_seg and indent_seg.is_type("placeholder"):
+            # Handle the placeholder case.
+            indent_seg = cast(TemplateSegment, indent_seg)
+            # There should always be a newline, so assert that.
+            assert "\n" in indent_seg.source_str
+            # We should always replace the section _containing_ the
+            # newline, rather than just bluntly inserting. This
+            # makes slicing later easier.
+            current_indent = indent_seg.source_str.split("\n")[-1]
+            source_slice = slice(
+                # Minus _one more_ for to cover the newline too.
+                indent_seg.pos_marker.source_slice.stop - len(current_indent) - 1,
+                indent_seg.pos_marker.source_slice.stop,
+            )
+
+            new_source_fix = SourceFix(
+                "\n" + desired_indent,
+                source_slice,
+                # The templated slice is going to be a zero slice _anyway_.
+                indent_seg.pos_marker.templated_slice,
+            )
+
+            if new_source_fix in indent_seg.source_fixes:  # pragma: no cover
+                # NOTE: If we're trying to reapply the same fix, don't.
+                # Just return an error without the fixes. This is probably
+                # a bug if we're taking this route, but this clause will help
+                # catch bugs faster if they occur.
+                reflow_logger.warning(
+                    "Attempted to apply a duplicate source fix to %r. "
+                    "Returning this time without fix.",
+                    indent_seg.pos_marker.source_str(),
+                )
+                fixes = []
+                new_segments = self.segments
+            else:
+                new_placeholder = indent_seg.edit(
+                    source_fixes=[new_source_fix],
+                    source_str=indent_seg.source_str[: -len(current_indent) + 1]
+                    + desired_indent,
+                )
+                fixes = [LintFix.replace(indent_seg, [new_placeholder])]
+                new_segments = tuple(
+                    new_placeholder if seg is indent_seg else seg
+                    for seg in self.segments
+                )
+
+            return [
+                LintResult(
+                    indent_seg,
+                    fixes,
+                    description=description
+                    or f"Expected {_indent_description(desired_indent)}.",
+                    source=source,
+                )
+            ], ReflowPoint(new_segments)
+
+        elif self.num_newlines():
+            # There is already a newline. Is there an indent?
             if indent_seg:
                 # Coerce existing indent to desired.
                 if indent_seg.raw == desired_indent:
                     # Trivial case. Indent already correct
                     return [], self
                 elif desired_indent == "":
-                    # Coerce to no indent. We don't want the indent. Delete it.
-                    new_indent = indent_seg.edit(desired_indent)
                     idx = self.segments.index(indent_seg)
                     return [
                         LintResult(
                             indent_seg,
+                            # Coerce to no indent. We don't want the indent. Delete it.
                             [LintFix.delete(indent_seg)],
-                            description="Line should not be indented.",
+                            description=description or "Line should not be indented.",
+                            source=source,
                         )
                     ], ReflowPoint(self.segments[:idx] + self.segments[idx + 1 :])
 
@@ -229,7 +461,9 @@ class ReflowPoint(ReflowElement):
                     LintResult(
                         indent_seg,
                         [LintFix.replace(indent_seg, [new_indent])],
-                        description=f"Expected {_indent_description(desired_indent)}.",
+                        description=description
+                        or f"Expected {_indent_description(desired_indent)}.",
+                        source=source,
                     )
                 ], ReflowPoint(
                     self.segments[:idx] + (new_indent,) + self.segments[idx + 1 :]
@@ -237,16 +471,45 @@ class ReflowPoint(ReflowElement):
 
             else:
                 # There is a newline, but no indent. Make one after the newline
-                # Find the index of the last newline.
-                for idx in range(len(self.segments) - 1, 0, -1):
+                # Find the index of the last newline (there _will_ be one because
+                # we checked self.num_newlines() above).
+
+                # Before going further, check we have a non-zero indent.
+                if not desired_indent:
+                    # We're trying to coerce a non-existent indent to zero. This
+                    # means we're already ok.
+                    return [], self
+
+                for idx in range(len(self.segments) - 1, -1, -1):
+                    # NOTE: Must be a _literal_ newline, not a templated one.
+                    # https://github.com/sqlfluff/sqlfluff/issues/4367
                     if self.segments[idx].is_type("newline"):
-                        break
+                        if self.segments[idx].pos_marker.is_literal():
+                            break
+
                 new_indent = WhitespaceSegment(desired_indent)
                 return [
                     LintResult(
-                        self.segments[idx],
-                        [LintFix.create_after(self.segments[idx], [new_indent])],
-                        description=f"Expected {_indent_description(desired_indent)}.",
+                        # The anchor for the *result* should be the segment
+                        # *after* the newline, otherwise the location of the fix
+                        # is confusing.
+                        # For this method, `before` is optional, but normally
+                        # passed. If it is there, use that as the anchor
+                        # instead. We fall back to the last newline if not.
+                        before if before else self.segments[idx],
+                        # Rather than doing a `create_after` here, we're
+                        # going to do a replace. This is effectively to give a hint
+                        # to the linter that this is safe to do before a templated
+                        # placeholder. This solves some potential bugs - although
+                        # it feels a bit like a workaround.
+                        [
+                            LintFix.replace(
+                                self.segments[idx], [self.segments[idx], new_indent]
+                            )
+                        ],
+                        description=description
+                        or f"Expected {_indent_description(desired_indent)}.",
+                        source=source,
                     )
                 ], ReflowPoint(
                     self.segments[: idx + 1] + (new_indent,) + self.segments[idx + 1 :]
@@ -282,7 +545,7 @@ class ReflowPoint(ReflowElement):
                         before,
                         [new_newline, new_indent],
                     )
-                    description = (
+                    description = description or (
                         "Expected line break and "
                         f"{_indent_description(desired_indent)} "
                         f"before {before_raw!r}."
@@ -298,7 +561,7 @@ class ReflowPoint(ReflowElement):
                         after,
                         [new_newline, new_indent],
                     )
-                    description = (
+                    description = description or (
                         "Expected line break and "
                         f"{_indent_description(desired_indent)} "
                         f"after {after_raw!r}."
@@ -316,34 +579,37 @@ class ReflowPoint(ReflowElement):
                 else:
                     new_segs = [new_newline, ws_seg.edit(desired_indent)]
                 idx = self.segments.index(ws_seg)
-                # Prefer before, because it makes the anchoring better.
-                if before:
-                    description = (
-                        "Expected line break and "
-                        f"{_indent_description(desired_indent)} "
-                        f"before {before.raw!r}."
-                    )
-                elif after:
-                    description = (
-                        "Expected line break and "
-                        f"{_indent_description(desired_indent)} "
-                        f"after {after.raw!r}."
-                    )
-                else:  # pragma: no cover
-                    # NOTE: Doesn't have test coverage because there's
-                    # normally an `after` or `before` value, so this
-                    # clause is unused.
-                    description = (
-                        "Expected line break and "
-                        f"{_indent_description(desired_indent)}."
-                    )
+                if not description:
+                    # Prefer before, because it makes the anchoring better.
+                    if before:
+                        description = (
+                            "Expected line break and "
+                            f"{_indent_description(desired_indent)} "
+                            f"before {before.raw!r}."
+                        )
+                    elif after:
+                        description = (
+                            "Expected line break and "
+                            f"{_indent_description(desired_indent)} "
+                            f"after {after.raw!r}."
+                        )
+                    else:  # pragma: no cover
+                        # NOTE: Doesn't have test coverage because there's
+                        # normally an `after` or `before` value, so this
+                        # clause is unused.
+                        description = (
+                            "Expected line break and "
+                            f"{_indent_description(desired_indent)}."
+                        )
                 fix = LintFix.replace(ws_seg, new_segs)
                 new_point = ReflowPoint(
                     self.segments[:idx] + tuple(new_segs) + self.segments[idx + 1 :]
                 )
                 anchor = ws_seg
 
-            return [LintResult(anchor, fixes=[fix], description=description)], new_point
+            return [
+                LintResult(anchor, fixes=[fix], description=description, source=source)
+            ], new_point
 
     def respace_point(
         self,
@@ -371,7 +637,7 @@ class ReflowPoint(ReflowElement):
             prev_block, next_block, strip_newlines
         )
 
-        reflow_logger.debug("Respacing: %s", self)
+        reflow_logger.debug("* Respacing: %r @ %s", self.raw, self.pos_marker)
 
         # The buffer is used to create the new reflow point to return
         segment_buffer, last_whitespace, new_results = process_spacing(
@@ -405,7 +671,13 @@ class ReflowPoint(ReflowElement):
             if last_whitespace:
                 ws_idx = self.segments.index(last_whitespace)
                 if ws_idx > 0:
-                    prev_seg = self.segments[ws_idx - 1]
+                    # NOTE: Iterate by index so that we don't slice the full range.
+                    for prev_seg_idx in range(ws_idx - 1, -1, -1):
+                        prev_seg = self.segments[prev_seg_idx]
+                        # Skip past any indents
+                        if not prev_seg.is_type("indent"):
+                            break
+
                     if (
                         prev_seg.is_type("newline")
                         # Not just unequal. Must be actively _before_.
@@ -441,7 +713,7 @@ class ReflowPoint(ReflowElement):
             # Return the results.
             return existing_results + new_results, ReflowPoint(tuple(segment_buffer))
 
-        # Otherwise this is this an inline case? (i.e. no newline)
+        # Otherwise is this an inline case? (i.e. no newline)
         reflow_logger.debug(
             "    Inline case. Constraints: %s <-> %s.",
             pre_constraint,
@@ -454,6 +726,7 @@ class ReflowPoint(ReflowElement):
             segment_buffer, results = handle_respace__inline_with_space(
                 pre_constraint,
                 post_constraint,
+                prev_block,
                 next_block,
                 root_segment,
                 segment_buffer,
diff --git a/src/sqlfluff/utils/reflow/helpers.py b/src/sqlfluff/utils/reflow/helpers.py
index 253c171..39d9f44 100644
--- a/src/sqlfluff/utils/reflow/helpers.py
+++ b/src/sqlfluff/utils/reflow/helpers.py
@@ -1,10 +1,16 @@
 """Helper utilities for reflow."""
 
 from itertools import chain
+import logging
 from typing import Iterable, List
 
 from sqlfluff.core.rules.base import LintFix, LintResult
-from sqlfluff.core.parser import BaseSegment
+from sqlfluff.core.parser import RawSegment, BaseSegment
+
+# We're in the utils module, but users will expect reflow
+# logs to appear in the context of rules. Hence it's a subset
+# of the rules logger.
+reflow_logger = logging.getLogger("sqlfluff.rules.reflow")
 
 
 def fixes_from_results(results: Iterable[LintResult]) -> List[LintFix]:
@@ -25,3 +31,21 @@ def pretty_segment_name(segment: BaseSegment) -> str:
         # Reference other segments just by their type.
         # (With underscores as spaces)
         return segment.get_type().replace("_", " ")
+
+
+def deduce_line_indent(raw_segment: RawSegment, root_segment: BaseSegment) -> str:
+    """Given a raw segment, deduce the indent of its line."""
+    seg_idx = root_segment.raw_segments.index(raw_segment)
+    indent_seg = None
+    # Use range and a lookup here because it's more efficient than slicing
+    # as we only need a subset of the long series.
+    for idx in range(seg_idx, -1, -1):
+        seg = root_segment.raw_segments[idx]
+        if seg.is_code:
+            indent_seg = None
+        elif seg.is_type("whitespace"):
+            indent_seg = seg
+        elif seg.is_type("newline"):
+            break
+    reflow_logger.debug("Deduced indent for %s as %s", raw_segment, indent_seg)
+    return indent_seg.raw if indent_seg else ""
diff --git a/src/sqlfluff/utils/reflow/rebreak.py b/src/sqlfluff/utils/reflow/rebreak.py
index 913e18c..93f8afc 100644
--- a/src/sqlfluff/utils/reflow/rebreak.py
+++ b/src/sqlfluff/utils/reflow/rebreak.py
@@ -9,8 +9,11 @@ from sqlfluff.core.parser import BaseSegment
 from sqlfluff.core.rules import LintFix, LintResult
 
 from sqlfluff.utils.reflow.elements import ReflowBlock, ReflowPoint, ReflowSequenceType
-from sqlfluff.utils.reflow.reindent import deduce_line_indent
-from sqlfluff.utils.reflow.helpers import fixes_from_results, pretty_segment_name
+from sqlfluff.utils.reflow.helpers import (
+    fixes_from_results,
+    pretty_segment_name,
+    deduce_line_indent,
+)
 
 
 # We're in the utils module, but users will expect reflow
@@ -182,11 +185,25 @@ def identify_rebreak_spans(
             if elem.depth_info.stack_positions[key].idx != 0:
                 continue
             # Can we find the end?
-            for end_idx in range(idx, len(element_buffer) - 2):
+            # NOTE: It's safe to look right to the end here rather than up to
+            # -2 because we're going to end up stepping back by two in the
+            # complicated cases.
+            for end_idx in range(idx, len(element_buffer)):
                 end_elem = element_buffer[end_idx]
+                final_idx = None
+
                 if not isinstance(end_elem, ReflowBlock):
                     continue
-                if end_elem.depth_info.stack_positions[key].type in ("end", "solo"):
+                elif key not in end_elem.depth_info.stack_positions:
+                    # If we get here, it means the last block was the end.
+                    # NOTE: This feels a little hacky, but it's because of a limitation
+                    # in detecting the "end" and "solo" markers effectively in larger
+                    # sections.
+                    final_idx = end_idx - 2  # pragma: no cover
+                elif end_elem.depth_info.stack_positions[key].type in ("end", "solo"):
+                    final_idx = end_idx
+
+                if final_idx is not None:
                     # Found the end. Add it to the stack.
                     # We reference the appropriate element from the parent stack.
                     target_depth = elem.depth_info.stack_hashes.index(key)
@@ -197,7 +214,7 @@ def identify_rebreak_spans(
                         _RebreakSpan(
                             target,
                             idx,
-                            end_idx,
+                            final_idx,
                             # NOTE: this isn't pretty but until it needs to be more
                             # complex, this works.
                             elem.line_position_configs[key].split(":")[0],
@@ -205,6 +222,7 @@ def identify_rebreak_spans(
                         )
                     )
                     break
+
             # If we find the start, but not the end, it's not a problem, but
             # we won't be rebreaking this span. This is important so that we
             # don't rebreak part of something without the context of what's
@@ -243,7 +261,16 @@ def rebreak_sequence(
     # to handle comments differently. There are two other important points:
     # 1. The next newline outward before code (but passing over comments).
     # 2. The point before the next _code_ segment (ditto comments).
-    locations = [_RebreakLocation.from_span(span, elem_buff) for span in spans]
+    locations = []
+    for span in spans:
+        try:
+            locations.append(_RebreakLocation.from_span(span, elem_buff))
+        # If we try and create a location from an incomplete span (i.e. one
+        # where we're unable to find the next newline effectively), then
+        # we'll get an exception. If we do - skip that one - we won't be
+        # able to effectively work with it even if we could construct it.
+        except UnboundLocalError:
+            pass
 
     # Handle each span:
     for loc in locations:
@@ -333,9 +360,24 @@ def rebreak_sequence(
                     lint_results=[],
                     anchor_on="after",
                 )
+
+                # Handle the potential case of an empty point.
+                # https://github.com/sqlfluff/sqlfluff/issues/4184
+                for i in range(loc.next.pre_code_pt_idx):
+                    if elem_buff[loc.next.pre_code_pt_idx - i].segments:
+                        create_anchor = elem_buff[
+                            loc.next.pre_code_pt_idx - i
+                        ].segments[-1]
+                        break
+                else:  # pragma: no cover
+                    # NOTE: We don't test this because we *should* always find
+                    # _something_ to anchor the creation on, even if we're
+                    # unlucky enough not to find it on the first pass.
+                    raise NotImplementedError("Could not find anchor for creation.")
+
                 fixes.append(
                     LintFix.create_after(
-                        elem_buff[loc.next.pre_code_pt_idx].segments[-1],
+                        create_anchor,
                         [loc.target],
                     )
                 )
diff --git a/src/sqlfluff/utils/reflow/reindent.py b/src/sqlfluff/utils/reflow/reindent.py
index 66a170f..f5ae4a0 100644
--- a/src/sqlfluff/utils/reflow/reindent.py
+++ b/src/sqlfluff/utils/reflow/reindent.py
@@ -1,8 +1,31 @@
 """Methods for deducing and understanding indents."""
 
+from collections import defaultdict
+from itertools import chain
 import logging
+from typing import Iterator, List, Optional, Set, Tuple, cast, Dict, DefaultDict
+from dataclasses import dataclass
+from sqlfluff.core.errors import SQLFluffUserError
 
-from sqlfluff.core.parser import RawSegment, BaseSegment
+from sqlfluff.core.parser.segments import Indent, SourceFix
+
+from sqlfluff.core.parser import (
+    RawSegment,
+    BaseSegment,
+    NewlineSegment,
+    WhitespaceSegment,
+)
+from sqlfluff.core.parser.segments.meta import MetaSegment, TemplateSegment
+from sqlfluff.core.rules.base import LintFix, LintResult
+from sqlfluff.core.slice_helpers import slice_length
+from sqlfluff.utils.reflow.elements import (
+    ReflowBlock,
+    ReflowPoint,
+    ReflowSequenceType,
+    IndentStats,
+)
+from sqlfluff.utils.reflow.helpers import fixes_from_results
+from sqlfluff.utils.reflow.rebreak import identify_rebreak_spans, _RebreakSpan
 
 
 # We're in the utils module, but users will expect reflow
@@ -11,19 +34,2100 @@ from sqlfluff.core.parser import RawSegment, BaseSegment
 reflow_logger = logging.getLogger("sqlfluff.rules.reflow")
 
 
-def deduce_line_indent(raw_segment: RawSegment, root_segment: BaseSegment) -> str:
-    """Given a raw segment, deduce the indent of it's line."""
-    seg_idx = root_segment.raw_segments.index(raw_segment)
+def has_untemplated_newline(point: ReflowPoint) -> bool:
+    """Determine whether a point contains any literal newlines.
+
+    NOTE: We check for standard literal newlines, but also
+    potential placeholder newlines which have been consumed.
+    """
+    # If there are no newlines (or placeholders) at all - then False.
+    if not point.class_types.intersection({"newline", "placeholder"}):
+        return False
+
+    for seg in point.segments:
+        # Make sure it's not templated.
+        # NOTE: An insertion won't have a pos_marker. But that
+        # also means it's not templated.
+        if seg.is_type("newline") and (
+            not seg.pos_marker or seg.pos_marker.is_literal()
+        ):
+            return True
+        if seg.is_type("placeholder"):
+            seg = cast(TemplateSegment, seg)
+            assert (
+                seg.block_type == "literal"
+            ), "Expected only literal placeholders in ReflowPoint."
+            if "\n" in seg.source_str:
+                return True
+    return False
+
+
+@dataclass(frozen=True)
+class _IndentPoint:
+    """Temporary structure for holding metadata about an indented ReflowPoint.
+
+    We only evaluate point which either *are* line breaks or
+    contain Indent/Dedent segments.
+    """
+
+    idx: int
+    indent_impulse: int
+    indent_trough: int
+    initial_indent_balance: int
+    last_line_break_idx: Optional[int]
+    is_line_break: bool
+    # NOTE: an "untaken indent" is referenced by the value we go *up* to.
+    # i.e. An Indent segment which takes the balance from 1 to 2 but with
+    # no newline is an untaken indent of value 2.
+    # It also only covers untaken indents _before_ this point. If this point
+    # is _also_ an untaken indent, we should be able to infer that ourselves.
+    untaken_indents: Tuple[int, ...]
+
+    @property
+    def closing_indent_balance(self):
+        return self.initial_indent_balance + self.indent_impulse
+
+
+@dataclass
+class _IndentLine:
+    """Temporary structure for handing a line of indent points.
+
+    Mutable so that we can adjust the initial indent balance
+    for things like comments and templated elements, after
+    constructing all the metadata for the points on the line.
+    """
+
+    initial_indent_balance: int
+    indent_points: List[_IndentPoint]
+
+    def __repr__(self):
+        """Compressed repr method to ease logging."""
+        return (
+            f"IndentLine(iib={self.initial_indent_balance}, ipts=["
+            + ", ".join(
+                f"iPt@{ip.idx}({ip.indent_impulse}, {ip.indent_trough}, "
+                f"{ip.initial_indent_balance}, {ip.last_line_break_idx}, "
+                f"{ip.is_line_break}, {ip.untaken_indents})"
+                for ip in self.indent_points
+            )
+            + "])"
+        )
+
+    @classmethod
+    def from_points(cls, indent_points: List[_IndentPoint]):
+        # Catch edge case for first line where we'll start with a
+        # block if no initial indent.
+        if indent_points[-1].last_line_break_idx:
+            starting_balance = indent_points[0].closing_indent_balance
+        else:
+            starting_balance = 0
+        return cls(starting_balance, indent_points)
+
+    def iter_blocks(self, elements: ReflowSequenceType) -> Iterator[ReflowBlock]:
+        # Edge case for initial lines (i.e. where last_line_break is None)
+        if self.indent_points[-1].last_line_break_idx is None:
+            range_slice = slice(None, self.indent_points[-1].idx)
+        else:
+            range_slice = slice(self.indent_points[0].idx, self.indent_points[-1].idx)
+        for element in elements[range_slice]:
+            if isinstance(element, ReflowPoint):
+                continue
+            yield element
+
+    def _iter_block_segments(
+        self, elements: ReflowSequenceType
+    ) -> Iterator[RawSegment]:
+        for block in self.iter_blocks(elements):
+            yield from block.segments
+
+    def is_all_comments(self, elements: ReflowSequenceType) -> bool:
+        """Is this line made up of just comments?"""
+        block_segments = list(self._iter_block_segments(elements))
+        return bool(block_segments) and all(
+            seg.is_type("comment") for seg in block_segments
+        )
+
+    def is_all_templates(self, elements: ReflowSequenceType) -> bool:
+        """Is this line made up of just template elements?"""
+        block_segments = list(self._iter_block_segments(elements))
+        return bool(block_segments) and all(
+            seg.is_type("placeholder", "template_loop") for seg in block_segments
+        )
+
+    def desired_indent_units(self, forced_indents: List[int]):
+        """Calculate the desired indent units.
+
+        This is the heart of the indentation calculations.
+
+        First we work out how many previous indents are untaken.
+        In the easy case, we just use the number of untaken
+        indents from previous points. The more complicated example
+        is where *this point* has both dedents *and* indents. In
+        this case we use the `indent_trough` to prune any
+        previous untaken indents which were above the trough at
+        this point.
+
+        After that we calculate the indent from the incoming
+        balance, minus any relevant untaken events *plus* any
+        previously untaken indents which have been forced (i.e.
+        inserted by the same operation).
+        """
+        if self.indent_points[0].indent_trough:
+            # This says - purge any untaken indents which happened before
+            # the trough (or at least only _keep_ any which would have remained).
+            # NOTE: Minus signs are really hard to get wrong here.
+            relevant_untaken_indents = [
+                i
+                for i in self.indent_points[0].untaken_indents
+                if i
+                <= self.initial_indent_balance
+                - (
+                    self.indent_points[0].indent_impulse
+                    - self.indent_points[0].indent_trough
+                )
+            ]
+        else:
+            relevant_untaken_indents = list(self.indent_points[0].untaken_indents)
+
+        desired_indent = (
+            self.initial_indent_balance
+            - len(relevant_untaken_indents)
+            + len(forced_indents)
+        )
+
+        reflow_logger.debug(
+            "Desired Indent Calculation: IB: %s, RUI: %s, UIL: %s, "
+            "iII: %s, iIT: %s. = %s",
+            self.initial_indent_balance,
+            relevant_untaken_indents,
+            self.indent_points[0].untaken_indents,
+            self.indent_points[0].indent_impulse,
+            self.indent_points[0].indent_trough,
+            desired_indent,
+        )
+        return desired_indent
+
+    def closing_balance(self):
+        """The closing indent balance of the line."""
+        return self.indent_points[-1].closing_indent_balance
+
+    def opening_balance(self):
+        """The opening indent balance of the line.
+
+        NOTE: We use the first point for the starting balance rather than
+        the line starting balance because we're using this to detect missing
+        lines and if the line has been corrected then we don't want to do
+        that.
+        """
+        # Edge case for first line of a file (where starting indent must be zero).
+        if self.indent_points[-1].last_line_break_idx is None:
+            return 0
+        return self.indent_points[0].closing_indent_balance
+
+
+def _revise_templated_lines(lines: List[_IndentLine], elements: ReflowSequenceType):
+    """Given an initial set of individual lines. Revise templated ones.
+
+    NOTE: This mutates the `lines` argument.
+
+    We do this to ensure that templated lines are _somewhat_ consistent.
+
+    Total consistency is very hard, given templated elements
+    can be used in a wide range of places. What we do here is
+    to try and take a somewhat rules based approach, but also
+    one which should fit mostly with user expectations.
+
+    To do this we have three scenarios:
+    1. Template tags are already on the same indent.
+    2. Template tags aren't, but can be hoisted without
+       effectively crossing code to be on the same indent.
+       This effectively does the same as "reshuffling"
+       placeholders, whitespace and indent segments but
+       does so without requiring intervention on the parsed
+       file.
+    3. Template tags which actively cut across the tree (i.e.
+       start and end tags aren't at the same level and can't
+       be hoisted). In this case the tags should be indented
+       at the lowest indent of the matching set.
+
+    In doing this we have to attempt to match up template
+    tags. This might fail. As we battle-test this feature
+    there may be some interesting bugs which come up!
+
+    In addition to properly indenting block tags, we also
+    filter out any jinja tags which contain newlines because
+    if we try and fix them, we'll only fix the *initial*
+    part of it. The rest won't be seen because it's within
+    the tag.
+
+    TODO: This could be an interesting way to extend the
+    indentation algorithm to also cover indentation within
+    jinja tags.
+    """
+    reflow_logger.debug("# Revise templated lines.")
+    # Because we want to modify the original lines, we're going
+    # to use their list index to keep track of them.
+    depths = defaultdict(list)
+    grouped = defaultdict(list)
+    for idx, line in enumerate(lines):
+        if line.is_all_templates(elements):
+            # We can't assume they're all a single block.
+            # But if they _start_ with a block, we should
+            # respect the indent of that block.
+            segment = cast(
+                MetaSegment, elements[line.indent_points[-1].idx - 1].segments[0]
+            )
+            assert segment.is_type("placeholder", "template_loop")
+            # If it's not got a block uuid, it's not a block, so it
+            # should just be indented as usual. No need to revise.
+            # e.g. comments or variables
+            if segment.block_uuid:
+                grouped[segment.block_uuid].append(idx)
+                depths[segment.block_uuid].append(line.initial_indent_balance)
+                reflow_logger.debug(
+                    "  UUID: %s @ %s = %r",
+                    segment.block_uuid,
+                    idx,
+                    segment.pos_marker.source_str(),
+                )
+
+    # Sort through the lines, so we do to *most* indented first.
+    sorted_group_indices = sorted(
+        grouped.keys(), key=lambda x: max(depths[x]), reverse=True
+    )
+    reflow_logger.debug("  Sorted Group UUIDs: %s", sorted_group_indices)
+
+    for group_idx, group_uuid in enumerate(sorted_group_indices):
+        reflow_logger.debug("  Evaluating Group UUID: %s", group_uuid)
+
+        group_lines = grouped[group_uuid]
+
+        # Check for case 1.
+        if len(set(lines[idx].initial_indent_balance for idx in group_lines)) == 1:
+            reflow_logger.debug("    Case 1: All the same")
+            continue
+
+        # Check for case 2.
+        # In this scenario, we only need to check the adjacent points.
+        # If there's any wiggle room, we pick the lowest option.
+        options: List[Set[int]] = []
+        for idx in group_lines:
+            line = lines[idx]
+
+            steps: Set[int] = {line.initial_indent_balance}
+            # Run backward through the pre point.
+            indent_balance = line.initial_indent_balance
+            first_point_idx = line.indent_points[0].idx
+            first_block = elements[first_point_idx + 1]
+
+            assert first_block.segments
+            first_segment = first_block.segments[0]
+            if first_segment.is_type("template_loop"):
+                # For template loops, don't count the line. They behave
+                # strangely.
+                continue
+
+            for seg in elements[first_point_idx].segments[::-1]:
+                if seg.is_type("indent"):
+                    # If it's the one straight away, after a block_end or
+                    # block_mid, skip it. We know this because it will have
+                    # block_uuid.
+                    if cast(Indent, seg).block_uuid:
+                        continue
+                    # Minus because we're going backward.
+                    indent_balance -= cast(Indent, seg).indent_val
+                    steps.add(indent_balance)
+            # Run forward through the post point.
+            indent_balance = line.initial_indent_balance
+            last_point_idx = line.indent_points[-1].idx
+            for seg in elements[last_point_idx].segments:
+                if seg.is_type("indent"):
+                    # If it's the one straight away, after a block_start or
+                    # block_mid, skip it. We know this because it will have
+                    # block_uuid.
+                    if cast(Indent, seg).block_uuid:
+                        continue
+                    # Positive because we're going forward.
+                    indent_balance += cast(Indent, seg).indent_val
+                    steps.add(indent_balance)
+
+            # NOTE: Edge case for consecutive blocks of the same type.
+            # If we're next to another block which is "inner" (i.e.) has
+            # already been handled. We can assume all options up to it's
+            # new indent are open for use.
+
+            _case_type = None
+            if first_segment.is_type("placeholder"):
+                _case_type = cast(TemplateSegment, first_segment).block_type
+
+            if _case_type in ("block_start", "block_mid"):
+                # Is following _line_ AND element also a block?
+                # i.e. nothing else between.
+                if (
+                    idx + 1 < len(lines)
+                    and first_point_idx + 3 == lines[idx + 1].indent_points[0].idx + 1
+                ):
+                    seg = elements[first_point_idx + 3].segments[0]
+                    if seg.is_type("placeholder"):
+                        if cast(TemplateSegment, seg).block_type == "block_start":
+                            _inter_steps = list(
+                                range(
+                                    line.initial_indent_balance,
+                                    lines[idx + 1].initial_indent_balance,
+                                )
+                            )
+                            reflow_logger.debug(
+                                "      Precedes block. Adding Steps: %s", _inter_steps
+                            )
+                            steps.update(_inter_steps)
+
+            if _case_type in ("block_end", "block_mid"):
+                # Is preceding _line_ AND element also a block?
+                # i.e. nothing else between.
+                if first_point_idx - 1 == lines[idx - 1].indent_points[0].idx + 1:
+                    seg = elements[first_point_idx - 1].segments[0]
+                    if seg.is_type("placeholder"):
+                        if cast(TemplateSegment, seg).block_type == "block_end":
+                            _inter_steps = list(
+                                range(
+                                    line.initial_indent_balance,
+                                    lines[idx - 1].initial_indent_balance,
+                                )
+                            )
+                            reflow_logger.debug(
+                                "      Follows block. Adding Steps: %s", _inter_steps
+                            )
+                            steps.update(_inter_steps)
+
+            reflow_logger.debug(
+                "    Line %s: Initial Balance: %s Options: %s",
+                idx,
+                lines[idx].initial_indent_balance,
+                steps,
+            )
+            options.append(steps)
+
+        # We should also work out what all the indents are _between_
+        # these options and make sure we don't go above that.
+
+        # Because there might be _outer_ loops, we look for spans
+        # between blocks in this group which don't contain any blocks
+        # from _outer_ loops. i.e. we can't just take all the lines from
+        # first to last.
+        last_group_line: Optional[int] = group_lines[0]  # last = previous.
+        net_balance = 0
+        balance_trough: Optional[int] = None
+        temp_balance_trough: Optional[int] = None
+        inner_lines = []
+        reflow_logger.debug("    Intermediate lines:")
+        # NOTE: +1 on the last range to make sure we _do_ process the last one.
+        for idx in range(group_lines[0] + 1, group_lines[-1] + 1):
+            for grp in sorted_group_indices[group_idx + 1 :]:
+                # found an "outer" group line, reset tracker.
+                if idx in grouped[grp]:
+                    last_group_line = None
+                    net_balance = 0
+                    temp_balance_trough = None  # Unset the buffer
+                    break
+
+            # Is it in this group?
+            if idx in group_lines:
+                # Stash the line indices of the inner lines.
+                if last_group_line:
+                    _inner_lines = list(range(last_group_line + 1, idx))
+                    reflow_logger.debug(
+                        "      Extending Intermediates with %s", _inner_lines
+                    )
+                    inner_lines.extend(_inner_lines)
+                # if we have a temp balance - crystallise it
+                if temp_balance_trough is not None:
+                    balance_trough = (
+                        temp_balance_trough
+                        if balance_trough is None
+                        else min(balance_trough, temp_balance_trough)
+                    )
+                    reflow_logger.debug(
+                        "      + Save Trough: %s (min = %s)",
+                        temp_balance_trough,
+                        balance_trough,
+                    )
+                    temp_balance_trough = None
+                last_group_line = idx
+                net_balance = 0
+            elif last_group_line:
+                # It's not a group line, but we're still tracking. Update with impulses.
+                is_subgroup_line = any(
+                    idx in grouped[grp] for grp in sorted_group_indices[:group_idx]
+                )
+                for ip in lines[idx].indent_points[:-1]:
+                    # Don't count the trough on group lines we've already covered.
+                    if "placeholder" in elements[ip.idx + 1].class_types:
+                        _block_type = cast(
+                            TemplateSegment, elements[ip.idx + 1].segments[0]
+                        ).block_type
+                        if _block_type in ("block_end", "block_mid"):
+                            reflow_logger.debug(
+                                "      Skipping trough before %r", _block_type
+                            )
+                            continue
+                    if ip.indent_trough < 0 and not is_subgroup_line:
+                        # NOTE: We set it temporarily here, because if we're going
+                        # to pass an outer template loop then we should discard it.
+                        # i.e. only count intervals within inner loops.
+                        _this_through = net_balance + ip.indent_trough
+                        temp_balance_trough = (
+                            _this_through
+                            if temp_balance_trough is None
+                            else min(temp_balance_trough, _this_through)
+                        )
+                        reflow_logger.debug(
+                            "      Stash Trough: %s (min = %s) @ %s",
+                            _this_through,
+                            temp_balance_trough,
+                            idx,
+                        )
+                    # NOTE: We update net_balance _after_ the clause above.
+                    net_balance += ip.indent_impulse
+
+        # Evaluate options.
+        # NOTE: We don't use the _last_ option, because it tends to be trailing
+        # and have strange effects.
+        overlap = set.intersection(*options[:-1])
+        reflow_logger.debug("    Simple Overlap: %s", overlap)
+        # Remove any options above the limit option.
+        # We minus one from the limit, because if it comes into effect
+        # we'll effectively remove the effects of the indents between the elements.
+
+        best_indent = max(overlap)
+
+        # Is there a mutually agreeable option?
+        reflow_logger.debug("    Balance Trough: %s", balance_trough)
+        if balance_trough is not None and balance_trough <= 0:
+            # Set the indent to the minimum of the existing ones.
+            best_indent = min(lines[idx].initial_indent_balance for idx in group_lines)
+            reflow_logger.debug(
+                "    Case 3: Best: %s. Inner Lines: %s", best_indent, inner_lines
+            )
+            # Remove one indent from all intermediate lines.
+            # This is because we're effectively saying that these
+            # placeholders shouldn't impact the indentation within them.
+            for idx in inner_lines:
+                # MUTATION
+                lines[idx].initial_indent_balance -= 1
+        else:
+            reflow_logger.debug(
+                "    Case 2: Best: %s, Overlap: %s", best_indent, overlap
+            )
+
+        # Set all the lines to this indent
+        for idx in group_lines:
+            # MUTATION
+            lines[idx].initial_indent_balance = best_indent
+
+    # Finally, look for any of the lines which contain newlines
+    # inside the placeholders. We use a slice to make sure
+    # we're iterating through a copy so that we can safely
+    # modify the underlying list.
+    for idx, line in enumerate(lines[:]):
+        # Get the first segment.
+        first_seg = elements[line.indent_points[0].idx + 1].segments[0]
+        src_str = first_seg.pos_marker.source_str()
+        if src_str != first_seg.raw and "\n" in src_str:
+            reflow_logger.debug(
+                "    Removing line %s from linting as placeholder "
+                "contains newlines.",
+                first_seg.pos_marker.working_line_no,
+            )
+            lines.remove(line)
+
+
+def _revise_comment_lines(lines: List[_IndentLine], elements: ReflowSequenceType):
+    """Given an initial set of individual lines. Revise comment ones.
+
+    NOTE: This mutates the `lines` argument.
+
+    We do this to ensure that lines with comments are aligned to
+    the following non-comment element.
+    """
+    reflow_logger.debug("# Revise comment lines.")
+    comment_line_buffer: List[int] = []
+
+    # Slice to avoid copying
+    for idx, line in enumerate(lines[:]):
+        if line.is_all_comments(elements):
+            comment_line_buffer.append(idx)
+        else:
+            # Not a comment only line, if there's a buffer anchor
+            # to this one.
+            for comment_line_idx in comment_line_buffer:
+                reflow_logger.debug(
+                    "  Comment Only Line: %s. Anchoring to %s", comment_line_idx, idx
+                )
+                # Mutate reference lines to match this one.
+                lines[
+                    comment_line_idx
+                ].initial_indent_balance = line.initial_indent_balance
+            # Reset the buffer
+            comment_line_buffer = []
+
+    # Any trailing comments should be anchored to the baseline.
+    for comment_line_idx in comment_line_buffer:
+        # Mutate reference lines to match this one.
+        lines[comment_line_idx].initial_indent_balance = 0
+        reflow_logger.debug(
+            "  Comment Only Line: %s. Anchoring to baseline", comment_line_idx
+        )
+
+
+def construct_single_indent(indent_unit: str, tab_space_size: int) -> str:
+    """Construct a single indent unit."""
+    if indent_unit == "tab":
+        return "\t"
+    elif indent_unit == "space":
+        return " " * tab_space_size
+    else:  # pragma: no cover
+        raise SQLFluffUserError(
+            f"Expected indent_unit of 'tab' or 'space', instead got {indent_unit}"
+        )
+
+
+def _prune_untaken_indents(
+    untaken_indents: Tuple[int, ...],
+    incoming_balance: int,
+    indent_stats: IndentStats,
+    has_newline: bool,
+) -> Tuple[int, ...]:
+    """Update the tracking of untaken indents.
+
+    This is an internal helper function for `_crawl_indent_points`.
+
+    We use the `trough` of the given indent stats to remove any untaken
+    indents which are now no longer relevant after balances are taken
+    into account.
+    """
+    # Strip any untaken indents above the new balance.
+    # NOTE: We strip back to the trough, not just the end point
+    # if the trough was lower than the impulse.
+    ui = tuple(
+        x
+        for x in untaken_indents
+        if x
+        <= (
+            incoming_balance + indent_stats.impulse + indent_stats.trough
+            if indent_stats.trough < indent_stats.impulse
+            else incoming_balance + indent_stats.impulse
+        )
+    )
+
+    # After stripping, we may have to add them back in.
+    # NOTE: all the values in the indent_stats are relative to the incoming
+    # indent, so we correct both of them here by using the incoming_balance.
+    if indent_stats.impulse > indent_stats.trough and not has_newline:
+        for i in range(indent_stats.trough, indent_stats.impulse):
+            indent_val = incoming_balance + i + 1
+            if indent_val - incoming_balance not in indent_stats.implicit_indents:
+                ui += (indent_val,)
+
+    return ui
+
+
+def _update_crawl_balances(
+    untaken_indents: Tuple[int, ...],
+    incoming_balance: int,
+    indent_stats: IndentStats,
+    has_newline: bool,
+) -> Tuple[int, Tuple[int, ...]]:
+    """Update the tracking of untaken indents and balances.
+
+    This is an internal helper function for `_crawl_indent_points`.
+    """
+    new_untaken_indents = _prune_untaken_indents(
+        untaken_indents, incoming_balance, indent_stats, has_newline
+    )
+    new_balance = incoming_balance + indent_stats.impulse
+
+    return new_balance, new_untaken_indents
+
+
+def _crawl_indent_points(
+    elements: ReflowSequenceType, allow_implicit_indents: bool = False
+) -> Iterator[_IndentPoint]:
+    """Crawl through a reflow sequence, mapping existing indents.
+
+    This is where *most* of the logic for smart indentation
+    happens. The values returned here have a large impact on
+    exactly how indentation is treated.
+
+    NOTE: If a line ends with a comment, indent impulses are pushed
+    to the point _after_ the comment rather than before to aid with
+    indentation. This saves searching for them later.
+
+    TODO: Once this function *works*, there's definitely headroom
+    for simplification and optimisation. We should do that.
+    """
+    last_line_break_idx = None
+    indent_balance = 0
+    untaken_indents: Tuple[int, ...] = ()
+    cached_indent_stats: Optional[IndentStats] = None
+    cached_point: Optional[_IndentPoint] = None
+    for idx, elem in enumerate(elements):
+        if isinstance(elem, ReflowPoint):
+            # NOTE: The following line should never lead to an index error
+            # because files should always have a trailing IndentBlock containing
+            # an "end_of_file" marker, and so the final IndentPoint should always
+            # have _something_ after it.
+            following_class_types = elements[idx + 1].class_types
+            indent_stats = IndentStats.from_combination(
+                cached_indent_stats,
+                elem.get_indent_impulse(allow_implicit_indents, following_class_types),
+            )
+
+            # Was there a cache?
+            if cached_indent_stats:
+                # If there was we can safely assume there is a cached point.
+                assert cached_point
+                # If there was, this is a signal that we need to yield two points.
+                # The content of those points depends on the newlines that surround the
+                # last segments (which will be comment block).
+                # _leading_ comments (i.e. those preceded by a newline): Yield _before_
+                # _trailing_ comments (or rare "mid" comments): Yield _after_
+                # TODO: We might want to reconsider the treatment of comments in the
+                # middle of lines eventually, but they're fairly unusual so not well
+                # covered in tests as of writing.
+
+                # We yield the first of those points here, and then manipulate the
+                # indent_stats object to allow the following code to yield the other.
+
+                # We can refer back to the cached point as a framework. In both
+                # cases we use the combined impulse and trough, but we use the
+                # current indent balance and untaken indents.
+                if cached_point.is_line_break:
+                    # It's a leading comment. Yield all the info in that point.
+                    yield _IndentPoint(
+                        cached_point.idx,
+                        indent_stats.impulse,
+                        indent_stats.trough,
+                        indent_balance,
+                        cached_point.last_line_break_idx,
+                        True,
+                        untaken_indents,
+                    )
+                    # Before zeroing, crystallise any effect on overall balances.
+                    indent_balance, untaken_indents = _update_crawl_balances(
+                        untaken_indents, indent_balance, indent_stats, True
+                    )
+                    # Set indent stats to zero because we've already yielded.
+                    indent_stats = IndentStats(0, 0, indent_stats.implicit_indents)
+                else:
+                    # It's a trailing (or mid) comment. Yield it in the next.
+                    yield _IndentPoint(
+                        cached_point.idx,
+                        0,
+                        0,
+                        indent_balance,
+                        cached_point.last_line_break_idx,
+                        False,
+                        untaken_indents,
+                    )
+                    # No need to reset indent stats. It's already good.
+
+            # Reset caches.
+            cached_indent_stats = None
+            has_newline = False
+            cached_point = None
+
+            # Do we have a newline?
+            has_newline = has_untemplated_newline(elem) and idx != last_line_break_idx
+
+            # Construct the point we may yield
+            indent_point = _IndentPoint(
+                idx,
+                indent_stats.impulse,
+                indent_stats.trough,
+                indent_balance,
+                last_line_break_idx,
+                has_newline,
+                untaken_indents,
+            )
+
+            # Update the last newline index if this is a newline.
+            # NOTE: We used the previous value in the construction of the
+            # _IndentPoint above and we only reset after that construction.
+            if has_newline:
+                last_line_break_idx = idx
+
+            # Is the next element a comment? If so - delay the decision until we've
+            # got any indents from after the comment too.
+            if "comment" in elements[idx + 1].class_types:
+                cached_indent_stats = indent_stats
+                # Create parts of a point to use later.
+                cached_point = indent_point
+                # We loop around so that we don't do the untaken indent calcs yet.
+                continue
+            # Is it meaningful as an indent point?
+            # i.e. Is it a line break? AND not a templated one.
+            # NOTE: a point at idx zero is meaningful because it's like an indent.
+            # NOTE: Last edge case. If we haven't yielded yet, but the
+            # next element is the end of the file. Yield.
+            elif (
+                has_newline
+                or indent_stats.impulse
+                or indent_stats.trough
+                or idx == 0
+                or elements[idx + 1].segments[0].is_type("end_of_file")
+            ):
+                yield indent_point
+
+            # Update balances
+            indent_balance, untaken_indents = _update_crawl_balances(
+                untaken_indents, indent_balance, indent_stats, has_newline
+            )
+
+
+def _map_line_buffers(
+    elements: ReflowSequenceType, allow_implicit_indents: bool = False
+) -> Tuple[List[_IndentLine], List[int]]:
+    """Map the existing elements, building up a list of _IndentLine.
+
+    Returns:
+        :obj:`tuple` of a :obj:`list` of :obj:`_IndentLine` and a
+            :obj:`list` of :obj:`int`. The first is the main output
+            and is designed to be used in assessing indents and
+            their effect through a SQL file. The latter is a list of
+            "imbalanced" indent locations, where the positive indent
+            is untaken, but its corresponding negative indent *is*
+            taken.
+
+    """
+    # First build up the buffer of lines.
+    lines = []
+    point_buffer = []
+    # Buffers to keep track of indents which are untaken on the way
+    # up but taken on the way down. We track them explicitly so we
+    # can force them later.
+
+    #: dict of ints: maps indentation balance values to the last
+    #: index location where they were seen. This is a working buffer
+    #: and not directly returned by the function.
+    untaken_indent_locs = {}
+    #: list of ints: a list of element indices which contain untaken
+    #: positive indents, that should be forced later because their
+    #: corresponding negative indent _was_ taken. Several edge cases
+    #: are excluded from this list and so not included. See code below.
+    imbalanced_locs = []
+
+    for indent_point in _crawl_indent_points(
+        elements, allow_implicit_indents=allow_implicit_indents
+    ):
+        # We evaluate all the points in a line at the same time, so
+        # we first build up a buffer.
+        point_buffer.append(indent_point)
+
+        if not indent_point.is_line_break:
+            # If it's not a line break, we should still check whether it's
+            # a positive untaken to keep track of them.
+            if indent_point.indent_impulse > indent_point.indent_trough:
+                untaken_indent_locs[
+                    indent_point.initial_indent_balance + indent_point.indent_impulse
+                ] = indent_point.idx
+            continue
+
+        # If it *is* a line break, then store it.
+        lines.append(_IndentLine.from_points(point_buffer))
+
+        # We should also evaluate whether this point inserts a newline at the close
+        # of an indent which was untaken on the way up.
+        # https://github.com/sqlfluff/sqlfluff/issues/4234
+        # Special case 1:
+        # If we're at the end of the file we shouldn't interpret it as a line break
+        # for problem indents, they're a bit of a special case.
+        # Special case 2:
+        # Bracketed expressions are a bit odd here.
+        # e.g.
+        #   WHERE (
+        #       foo = bar
+        #   )
+        #   LIMIT 1
+        #
+        # Technically there's an untaken indent before the opening bracket
+        # but this layout is common practice so we're not going to force
+        # one there even though there _is_ a line break after the closing
+        # bracket.
+        following_class_types = elements[indent_point.idx + 1].class_types
+        if (
+            indent_point.indent_trough
+            # End of file ends case. (Special case 1)
+            and "end_of_file" not in following_class_types
+        ):
+            passing_indents = list(
+                range(
+                    indent_point.initial_indent_balance,
+                    indent_point.initial_indent_balance + indent_point.indent_trough,
+                    -1,
+                )
+            )
+            # There might be many indents at this point, but if any match, then
+            # we should still force an indent
+            if any(i in indent_point.untaken_indents for i in passing_indents):
+                for i in passing_indents:
+                    # If we don't have the location of the untaken indent, then
+                    # skip it for now. TODO: Check this isn't a bug when this happens.
+                    # It seems very rare for now.
+                    if i not in untaken_indent_locs:
+                        continue
+
+                    loc = untaken_indent_locs[i]
+
+                    # First check for bracket special case. It's less about whether
+                    # the section _ends_ with a lone bracket, and more about whether
+                    # the _starting point_ is a bracket which closes a line. If it
+                    # is, then skip this location. (Special case 2).
+                    # NOTE: We can safely "look ahead" here because we know all files
+                    # end with an IndentBlock, and we know here that `loc` refers to
+                    # an IndentPoint.
+                    if "start_bracket" in elements[loc + 1].class_types:
+                        continue
+
+                    # If the location was in the line we're just closing. That's
+                    # not a problem because it's an untaken indent which is closed
+                    # on the same line. Otherwise it is - append it to the buffer
+                    # to sort later.
+                    if not any(ip.idx == loc for ip in point_buffer):
+                        imbalanced_locs.append(loc)
+
+        # Remove any which are now no longer relevant from the working buffer.
+        for k in list(untaken_indent_locs.keys()):
+            if k > indent_point.initial_indent_balance + indent_point.indent_trough:
+                del untaken_indent_locs[k]
+
+        # Reset the buffer
+        point_buffer = [indent_point]
+
+    # Handle potential final line
+    if len(point_buffer) > 1:
+        lines.append(_IndentLine.from_points(point_buffer))
+
+    return lines, imbalanced_locs
+
+
+def _deduce_line_current_indent(
+    elements: ReflowSequenceType, last_line_break_idx: Optional[int] = None
+) -> str:
+    """Deduce the current indent string.
+
+    This method accounts for both literal indents and indents
+    consumed from the source as by potential templating tags.
+    """
     indent_seg = None
-    for seg in root_segment.raw_segments[seg_idx::-1]:
-        if seg.is_code:
-            indent_seg = None
-        elif seg.is_type("whitespace"):
-            indent_seg = seg
-        elif seg.is_type("newline"):
-            break
-    reflow_logger.debug("Deduced indent for %s as %s", raw_segment, indent_seg)
-    if indent_seg:
+    if last_line_break_idx:
+        indent_seg = cast(
+            ReflowPoint, elements[last_line_break_idx]
+        )._get_indent_segment()
+    elif isinstance(elements[0], ReflowPoint) and elements[0].segments[
+        0
+    ].pos_marker.working_loc == (1, 1):
+        # No last_line_break_idx, but this is a point. It's the first line.
+
+        # First check whether this is a first line with a leading
+        # placeholder.
+        if elements[0].segments[0].is_type("placeholder"):
+            reflow_logger.debug("    Handling as initial leading placeholder")
+            seg = cast(TemplateSegment, elements[0].segments[0])
+            # Is the placeholder a consumed whitespace?
+            if seg.source_str.startswith((" ", "\t")):
+                indent_seg = seg
+        # Otherwise it's an initial leading literal whitespace.
+        else:
+            reflow_logger.debug("    Handling as initial leading whitespace")
+            for indent_seg in elements[0].segments[::-1]:
+                if indent_seg.is_type("whitespace") and not indent_seg.is_templated:
+                    break
+            # Handle edge case of no whitespace, but with newline.
+            if not indent_seg.is_type("whitespace"):
+                indent_seg = None
+
+    if not indent_seg:
+        return ""
+
+    # We have to check pos marker before checking is templated.
+    # Insertions don't have pos_markers - so aren't templated,
+    # but also don't support calling is_templated.
+    if indent_seg.is_type("placeholder"):
+        # It's a consumed indent.
+        return cast(TemplateSegment, indent_seg).source_str.split("\n")[-1] or ""
+    elif not indent_seg.pos_marker or not indent_seg.is_templated:
+        # It's a literal
+        assert "\n" not in indent_seg.raw, f"Found newline in indent: {indent_seg}"
         return indent_seg.raw
+    else:  # pragma: no cover
+        # It's templated. This shouldn't happen. Segments returned by
+        # _get_indent_segment, should be valid indents (i.e. whitespace
+        # or placeholders for consumed whitespace). This is a bug.
+        if indent_seg.pos_marker:
+            reflow_logger.warning(
+                "Segment position marker: %s: [SRC: %s, TMP:%s]",
+                indent_seg.pos_marker,
+                indent_seg.pos_marker.source_slice,
+                indent_seg.pos_marker.templated_slice,
+            )
+        raise NotImplementedError(
+            "Unexpected templated indent. Report this as a bug on "
+            f"GitHub. Segment: {indent_seg}\n"
+            "https://github.com/sqlfluff/sqlfluff/issues/new/choose"
+        )
+
+
+def _lint_line_starting_indent(
+    elements: ReflowSequenceType,
+    indent_line: _IndentLine,
+    single_indent: str,
+    forced_indents: List[int],
+) -> List[LintResult]:
+    """Lint the indent at the start of a line.
+
+    NOTE: This mutates `elements` to avoid lots of copying.
+    """
+    indent_points = indent_line.indent_points
+    # Set up the default anchor
+    initial_point_idx = indent_points[0].idx
+    anchor = {"before": elements[initial_point_idx + 1].segments[0]}
+    # Find initial indent, and deduce appropriate string indent.
+    current_indent = _deduce_line_current_indent(
+        elements, indent_points[-1].last_line_break_idx
+    )
+    desired_indent_units = indent_line.desired_indent_units(forced_indents)
+    desired_starting_indent = desired_indent_units * single_indent
+    initial_point = cast(ReflowPoint, elements[initial_point_idx])
+
+    if current_indent == desired_starting_indent:
+        return []
+
+    # Edge case: Multiline comments. If the previous line was a multiline
+    # comment and this line starts with a multiline comment, then we should
+    # only lint the indent if it's _too small_. Otherwise we risk destroying
+    # indentation which the logic here is not smart enough to handle.
+    if (
+        initial_point_idx > 0
+        and initial_point_idx < len(elements) - 1
+        and "block_comment" in elements[initial_point_idx - 1].class_types
+        and "block_comment" in elements[initial_point_idx + 1].class_types
+    ):
+        if len(current_indent) > len(desired_starting_indent):
+            reflow_logger.debug("    Indent is bigger than required. OK.")
+            return []
+
+    reflow_logger.debug(
+        "    Correcting indent @ line %s. Existing indent: %r -> %r",
+        elements[initial_point_idx + 1].segments[0].pos_marker.working_line_no,
+        current_indent,
+        desired_starting_indent,
+    )
+
+    # Initial point gets special handling if it has no newlines.
+    if indent_points[0].idx == 0 and not indent_points[0].is_line_break:
+        init_seg = elements[indent_points[0].idx].segments[0]
+        if init_seg.is_type("placeholder"):
+            init_seg = cast(TemplateSegment, init_seg)
+            # If it's a placeholder initial indent, then modify the placeholder
+            # to remove the indent from it.
+            src_fix = SourceFix(
+                "",
+                source_slice=slice(0, len(current_indent) + 1),
+                templated_slice=slice(0, 0),
+            )
+            fixes = [
+                LintFix.replace(
+                    init_seg,
+                    [init_seg.edit(source_fixes=[src_fix], source_str="")],
+                )
+            ]
+        else:
+            # Otherwise it's just initial whitespace. Remove it.
+            fixes = [LintFix.delete(seg) for seg in initial_point.segments]
+
+        new_results = [
+            LintResult(
+                initial_point.segments[0],
+                fixes,
+                description="First line should not be indented.",
+                source="reflow.indent.existing",
+            )
+        ]
+        new_point = ReflowPoint(())
+    # Placeholder indents also get special treatment
     else:
-        return ""
+        new_results, new_point = initial_point.indent_to(
+            desired_starting_indent,
+            source="reflow.indent.existing",
+            **anchor,  # type: ignore
+        )
+
+    elements[initial_point_idx] = new_point
+    return new_results
+
+
+def _lint_line_untaken_positive_indents(
+    elements: ReflowSequenceType,
+    indent_line: _IndentLine,
+    single_indent: str,
+    imbalanced_indent_locs: List[int],
+) -> Tuple[List[LintResult], List[int]]:
+    """Check for positive indents which should have been taken."""
+    # First check whether this line contains any of the untaken problem points.
+    for ip in indent_line.indent_points:
+        if ip.idx in imbalanced_indent_locs:
+            # Force it at the relevant position.
+            desired_indent = single_indent * (
+                ip.closing_indent_balance - len(ip.untaken_indents)
+            )
+            reflow_logger.debug(
+                "    Detected imbalanced +ve break @ line %s. Indenting to %r",
+                elements[ip.idx + 1].segments[0].pos_marker.working_line_no,
+                desired_indent,
+            )
+            target_point = cast(ReflowPoint, elements[ip.idx])
+            results, new_point = target_point.indent_to(
+                desired_indent,
+                before=elements[ip.idx + 1].segments[0],
+                source="reflow.indent.imbalance",
+            )
+            elements[ip.idx] = new_point
+            # Keep track of the indent we forced, by returning it.
+            return results, [ip.closing_indent_balance]
+
+    # If we don't close the line higher there won't be any.
+    starting_balance = indent_line.opening_balance()
+    last_ip = indent_line.indent_points[-1]
+    # Check whether it closes the opening indent.
+    if last_ip.initial_indent_balance + last_ip.indent_trough <= starting_balance:
+        return [], []
+    # It's not, we don't close out an opened indent.
+    # NOTE: Because trailing comments should always shift their any
+    # surrounding indentation effects to _after_ their position, we
+    # should just be able to evaluate them safely from the end of the line.
+
+    indent_points = indent_line.indent_points
+
+    # Account for the closing trough.
+    closing_trough = last_ip.initial_indent_balance + (
+        last_ip.indent_trough or last_ip.indent_impulse
+    )
+
+    # On the way up we're looking for whether the ending balance
+    # was an untaken indent or not. If it *was* untaken, there's
+    # a good chance that we *should* take it.
+    # NOTE: an implicit indent would not force a newline
+    # because it wouldn't be in the untaken_indents. It's
+    # considered _taken_ even if not.
+    if closing_trough not in indent_points[-1].untaken_indents:
+        # If the closing point doesn't correspond to an untaken
+        # indent within the line (i.e. it _was_ taken), then
+        # there won't be an appropriate place to force an indent.
+        return [], []
+
+    # The closing indent balance *does* correspond to an
+    # untaken indent on this line. We *should* force a newline
+    # at that position.
+    for ip in indent_points:
+        if ip.closing_indent_balance == closing_trough:
+            target_point_idx = ip.idx
+            desired_indent = single_indent * (
+                ip.closing_indent_balance - len(ip.untaken_indents)
+            )
+            break
+    else:  # pragma: no cover
+        raise NotImplementedError("We should always find the relevant point.")
+    reflow_logger.debug(
+        "    Detected missing +ve line break @ line %s. Indenting to %r",
+        elements[target_point_idx + 1].segments[0].pos_marker.working_line_no,
+        desired_indent,
+    )
+    target_point = cast(ReflowPoint, elements[target_point_idx])
+    results, new_point = target_point.indent_to(
+        desired_indent,
+        before=elements[target_point_idx + 1].segments[0],
+        source="reflow.indent.positive",
+    )
+    elements[target_point_idx] = new_point
+    # Keep track of the indent we forced, by returning it.
+    return results, [closing_trough]
+
+
+def _lint_line_untaken_negative_indents(
+    elements: ReflowSequenceType,
+    indent_line: _IndentLine,
+    single_indent: str,
+    forced_indents: List[int],
+) -> List[LintResult]:
+    """Check for negative indents which should have been taken."""
+    # If we don't close lower than we start, there won't be any.
+    if indent_line.closing_balance() >= indent_line.opening_balance():
+        return []
+
+    results: List[LintResult] = []
+    # On the way down we're looking for indents which *were* taken on
+    # the way up, but currently aren't on the way down. We slice so
+    # that the _last_ point isn't evaluated, because that's fine.
+    for ip in indent_line.indent_points[:-1]:
+        # Is line break, or positive indent?
+        if ip.is_line_break or ip.indent_impulse >= 0:
+            continue
+
+        # When using implicit indents, we may find untaken negatives which
+        # aren't shallower than the line they're on. This is because they
+        # were implicit on the way up and so not included in `untaken_indents`.
+        # To catch them we also check that we're shallower than the start of
+        # of the line.
+        if (
+            ip.initial_indent_balance + ip.indent_trough
+            >= indent_line.opening_balance()
+        ):
+            continue
+
+        # It's negative, is it untaken? In the case of a multi-dedent
+        # they must _all_ be untaken to take this route.
+        covered_indents = set(
+            range(
+                ip.initial_indent_balance,
+                ip.initial_indent_balance + ip.indent_trough,
+                -1,
+            )
+        )
+        untaken_indents = set(ip.untaken_indents).difference(forced_indents)
+        if covered_indents.issubset(untaken_indents):
+            # Yep, untaken.
+            continue
+
+        # Edge Case: Comments. Since introducing the code to push indent effects
+        # to the point _after_ comments, we no longer need to detect an edge case
+        # for them here. If we change that logic again in the future, so that
+        # indent values are allowed before comments - that code should be
+        # reintroduced here.
+
+        # Edge Case: Semicolons. For now, semicolon placement is a little
+        # more complicated than what we do here. For now we don't (by
+        # default) introduce missing -ve indents before semicolons.
+        # TODO: Review whether this is a good idea, or whether this should be
+        # more configurable.
+        # NOTE: This could potentially lead to a weird situation if two
+        # statements are already on the same line. That's a bug to solve later.
+        if elements[ip.idx + 1 :] and elements[ip.idx + 1].class_types.intersection(
+            ("statement_terminator", "comma")
+        ):
+            reflow_logger.debug(
+                "    Detected missing -ve line break @ line %s, before "
+                "semicolon or comma. Ignoring...",
+                elements[ip.idx + 1].segments[0].pos_marker.working_line_no,
+            )
+            continue
+
+        # Edge case: template blocks. These sometimes sit in odd places
+        # in the parse tree so don't force newlines before them
+        if elements[ip.idx + 1 :] and "placeholder" in elements[ip.idx + 1].class_types:
+            # are any of those placeholders blocks?
+            if any(
+                cast(TemplateSegment, seg).block_type.startswith("block")
+                for seg in elements[ip.idx + 1].segments
+                if seg.is_type("placeholder")
+            ):
+                reflow_logger.debug(
+                    "    Detected missing -ve line break @ line %s, before "
+                    "block placeholder. Ignoring...",
+                    elements[ip.idx + 1].segments[0].pos_marker.working_line_no,
+                )
+                continue
+
+        # It's negative, not a line break and was taken on the way up.
+        # This *should* be an indent!
+        desired_indent = single_indent * (
+            ip.closing_indent_balance - len(ip.untaken_indents) + len(forced_indents)
+        )
+        reflow_logger.debug(
+            "    Detected missing -ve line break @ line %s. Indenting to %r",
+            elements[ip.idx + 1].segments[0].pos_marker.working_line_no,
+            desired_indent,
+        )
+        target_point = cast(ReflowPoint, elements[ip.idx])
+        new_results, new_point = target_point.indent_to(
+            desired_indent,
+            before=elements[ip.idx + 1].segments[0],
+            source="reflow.indent.negative",
+        )
+        elements[ip.idx] = new_point
+        results += new_results
+
+    return results
+
+
+def _lint_line_buffer_indents(
+    elements: ReflowSequenceType,
+    indent_line: _IndentLine,
+    single_indent: str,
+    forced_indents: List[int],
+    imbalanced_indent_locs: List[int],
+) -> List[LintResult]:
+    """Evaluate a single set of indent points on one line.
+
+    NOTE: This mutates the given `elements` and `forced_indents` input to avoid
+    lots of copying.
+
+    Order of operations:
+    1. Evaluate the starting indent for this line.
+    2. For points which aren't line breaks in the line, we evaluate them
+       to see whether they *should* be. We separately address missing indents
+       on the way *up* and then on the way *down*.
+       - *Up* in this sense means where the indent balance goes up, but isn't
+         closed again within the same line - e.g. :code:`SELECT a + (2 +` where
+         the indent implied by the bracket isn't closed out before the end of the
+         line.
+       - *Down* in this sense means where we've dropped below the starting
+         indent balance of the line - e.g. :code:`1 + 1) FROM foo` where the
+         line starts within a bracket and then closes that *and* closes an
+         apparent SELECT clause without a newline.
+
+    This method returns fixes, including appropriate descriptions, to
+    allow generation of LintResult objects directly from them.
+    """
+    reflow_logger.info(
+        "    Line #%s [source line #%s]. idx=%s:%s. FI %s. UPI: %s.",
+        elements[indent_line.indent_points[0].idx + 1]
+        .segments[0]
+        .pos_marker.working_line_no,
+        elements[indent_line.indent_points[0].idx + 1]
+        .segments[0]
+        .pos_marker.source_position()[0],
+        indent_line.indent_points[0].idx,
+        indent_line.indent_points[-1].idx,
+        forced_indents,
+        imbalanced_indent_locs,
+    )
+    reflow_logger.debug(
+        "   Line Content: %s",
+        [
+            repr(elem.raw)
+            for elem in elements[
+                indent_line.indent_points[0].idx : indent_line.indent_points[-1].idx
+            ]
+        ],
+    )
+    reflow_logger.debug("  Evaluate Line: %s. FI %s", indent_line, forced_indents)
+    results = []
+
+    # First, handle starting indent.
+    results += _lint_line_starting_indent(
+        elements, indent_line, single_indent, forced_indents
+    )
+
+    # Second, handle potential missing positive indents.
+    new_results, new_indents = _lint_line_untaken_positive_indents(
+        elements, indent_line, single_indent, imbalanced_indent_locs
+    )
+    # If we have any, bank them and return. We don't need to check for
+    # negatives because we know we're on the way up.
+    if new_results:
+        results += new_results
+        # Keep track of any indents we forced
+        forced_indents.extend(new_indents)
+        return results
+
+    # Third, handle potential missing negative indents.
+    results += _lint_line_untaken_negative_indents(
+        elements, indent_line, single_indent, forced_indents
+    )
+
+    # Lastly remove any forced indents above the closing balance.
+    # Iterate through a slice so we're not editing the thing
+    # that we're iterating through.
+    for i in forced_indents[:]:
+        if i > indent_line.closing_balance():
+            forced_indents.remove(i)
+
+    return results
+
+
+def lint_indent_points(
+    elements: ReflowSequenceType,
+    single_indent: str,
+    skip_indentation_in: Set[str] = set(),
+    allow_implicit_indents: bool = False,
+) -> Tuple[ReflowSequenceType, List[LintResult]]:
+    """Lint the indent points to check we have line breaks where we should.
+
+    For linting indentation - we *first* need to make sure there are
+    line breaks in all the places there should be. This takes an input
+    set of indent points, and inserts additional line breaks in the
+    necessary places to make sure indentation can be valid.
+
+    Specifically we're addressing two things:
+
+    1. Any untaken indents. An untaken indent is only valid if it's
+    corresponding dedent is on the same line. If that is not the case,
+    there should be a line break at the location of the indent and dedent.
+
+    2. The indentation of lines. Given the line breaks are in the right
+    place, is the line indented correctly.
+
+    We do these at the same time, because we can't do the second without
+    having line breaks in the right place, but if we're inserting a line
+    break, we need to also know how much to indent by.
+    """
+    # First map the line buffers.
+    lines: List[_IndentLine]
+    imbalanced_indent_locs: List[int]
+    lines, imbalanced_indent_locs = _map_line_buffers(
+        elements, allow_implicit_indents=allow_implicit_indents
+    )
+
+    # Revise templated indents
+    _revise_templated_lines(lines, elements)
+    # Revise comment indents
+    _revise_comment_lines(lines, elements)
+
+    # Skip elements we're configured to not touch (i.e. scripts)
+    for line in lines[:]:
+        for block in line.iter_blocks(elements):
+            if any(
+                skip_indentation_in.intersection(types)
+                for types in block.depth_info.stack_class_types
+            ):
+                reflow_logger.debug(
+                    "Skipping line %s because it is within one of %s",
+                    line,
+                    skip_indentation_in,
+                )
+                lines.remove(line)
+                break
+
+    reflow_logger.debug("# Evaluate lines for indentation.")
+    # Last: handle each of the lines.
+    results: List[LintResult] = []
+    # NOTE: forced_indents is mutated by _lint_line_buffer_indents
+    # It's used to pass from one call to the next.
+    forced_indents: List[int] = []
+    elem_buffer = elements.copy()  # Make a working copy to mutate.
+    for line in lines:
+        line_results = _lint_line_buffer_indents(
+            elem_buffer, line, single_indent, forced_indents, imbalanced_indent_locs
+        )
+        if line_results:
+            reflow_logger.info("      PROBLEMS:")
+            for res in line_results:
+                reflow_logger.info("        %s @ %s", res.source, res.anchor)
+                reflow_logger.info("          %s", res.description)
+        results += line_results
+
+    return elem_buffer, results
+
+
+def _source_char_len(elements: ReflowSequenceType):
+    """Calculate length in the source file.
+
+    NOTE: This relies heavily on the sequence already being
+    split appropriately. It will raise errors if not.
+
+    TODO: There's a good chance that this might not play well
+    with other fixes. If we find segments without positions
+    then it will probably error. Those will need ironing
+    out.
+
+    TODO: This probably needs more tests. It's already
+    the source of quite a few fiddly sections.
+    """
+    char_len = 0
+    last_source_slice: Optional[slice] = None
+    for seg in chain.from_iterable(elem.segments for elem in elements):
+        # Indent tokens occasionally have strange position markers.
+        # They also don't have length so skip them.
+        # TODO: This is actually caused by bugs and inconsistencies
+        # in how the source_slice is generated for the position markers
+        # of indent and dedent tokens. That's a job for another day
+        # however.
+        if seg.is_type("indent"):
+            continue
+        # Get the source position. If there is no source position then it's
+        # a recent edit or modification. We shouldn't evaluate it until it's
+        # been positioned. Without a source marker we don't know how to treat
+        # it.
+        if not seg.pos_marker:  # pragma: no cover
+            break
+        source_slice = seg.pos_marker.source_slice
+        # Is there a newline in the source string?
+        source_str = seg.pos_marker.source_str()
+        if "\n" in source_str:
+            # There is. Stop here. It's probably a complicated
+            # jinja tag, so it's safer to stop here.
+            # TODO: In future, we should probably be a little
+            # smarter about this, but for now this is ok. Without
+            # an algorithm for layout out code _within_ jinja tags
+            # we won't be able to suggest appropriate fixes.
+            char_len += source_str.index("\n")
+            break
+        slice_len = slice_length(source_slice)
+        # Only update the length if it's a new slice.
+        if source_slice != last_source_slice:
+            # If it's got size in the template but not in the source, it's
+            # probably an insertion.
+            if seg.raw and not slice_len:
+                char_len += len(seg.raw)
+                # NOTE: Don't update the last_source_slice.
+            elif not slice_len:
+                # If it's not got a raw and no length, it's
+                # irrelevant. Ignore it. It's probably a meta.
+                continue
+            # Otherwise if we're literal, use the raw length
+            # because it might be an edit.
+            elif seg.pos_marker.is_literal():
+                char_len += len(seg.raw)
+                last_source_slice = source_slice
+            # Otherwise assume it's templated code.
+            else:
+                char_len += slice_length(source_slice)
+                last_source_slice = source_slice
+
+    return char_len
+
+
+def _rebreak_priorities(spans: List[_RebreakSpan]) -> Dict[int, int]:
+    """Process rebreak spans into opportunities to split lines.
+
+    The index to insert a potential indent at depends on the
+    line_position of the span. Infer that here and store the indices
+    in the elements.
+    """
+    rebreak_priority = {}
+    for span in spans:
+        if span.line_position == "leading":
+            rebreak_indices = [span.start_idx - 1]
+        elif span.line_position == "trailing":
+            rebreak_indices = [span.end_idx + 1]
+        elif span.line_position == "alone":
+            rebreak_indices = [span.start_idx - 1, span.end_idx + 1]
+        else:  # pragma: no cover
+            raise NotImplementedError(
+                "Unexpected line position: %s", span.line_position
+            )
+        # NOTE: Operator precedence here is hard coded. It could be
+        # moved to configuration in the layout section in the future.
+        # Operator precedence is fairly consistent between dialects
+        # so for now it feels ok that it's coded here - it also wouldn't
+        # be a breaking change at that point so no pressure to release
+        # it early.
+        span_raw = span.target.raw_upper
+        priority = 6  # Default to 6 for now i.e. the same as '+'
+        # Override priority for specific precedence.
+        if span_raw == ",":
+            priority = 1
+        elif span.target.is_type("assignment_operator"):
+            # This one is a little rarer so not covered in tests yet.
+            # Logic is the same as others though.
+            priority = 2  # pragma: no cover
+        elif span_raw == "OR":
+            priority = 3
+        elif span_raw == "AND":
+            priority = 4
+        elif span.target.is_type("comparison_operator"):
+            priority = 5
+        elif span_raw in ("*", "/", "%"):
+            priority = 7
+
+        for rebreak_idx in rebreak_indices:
+            rebreak_priority[rebreak_idx] = priority
+
+    return rebreak_priority
+
+
+MatchedIndentsType = DefaultDict[float, List[int]]
+
+
+def _increment_balance(
+    input_balance: int,
+    indent_stats: IndentStats,
+    elem_idx: int,
+) -> Tuple[int, MatchedIndentsType]:
+    """Logic for stepping through _match_indents.
+
+    This is the part of that logic which is potentially fragile
+    so is separated here into a more isolated function for
+    better testing. It's very easy to get wrong and necessary
+    so we don't mistake empty elements, but potentially
+    fragile nonetheless.
+
+    Returns:
+        A tuple where the first element is the resulting balance
+            and the second is a :obj:`defaultdict` of the new
+            elements to add to `matched_indents`.
+
+    Positive indent example:
+    >>> _increment_balance(0, IndentStats(1, 0), 7)
+    (1, defaultdict(<class 'list'>, {1.0: [7]}))
+
+    Negative indent example:
+    >>> _increment_balance(3, IndentStats(-1, -1), 11)
+    (2, defaultdict(<class 'list'>, {3.0: [11]}))
+
+    Double negative indent example:
+    >>> _increment_balance(3, IndentStats(-2, -2), 16)
+    (1, defaultdict(<class 'list'>, {3.0: [16], 2.0: [16]}))
+
+    Dip indent example:
+    >>> _increment_balance(3, IndentStats(0, -1), 21)
+    (3, defaultdict(<class 'list'>, {3.0: [21]}))
+    """
+    balance = input_balance
+    matched_indents: MatchedIndentsType = defaultdict(list)
+    if indent_stats.trough < 0:  # NOTE: for negative, *trough* counts.
+        # in case of more than one indent we loop and apply to all.
+        for b in range(0, indent_stats.trough, -1):
+            matched_indents[(balance + b) * 1.0].append(elem_idx)
+        # NOTE: We carry forward the impulse, not the trough.
+        # This is important for dedent+indent pairs.
+        balance += indent_stats.impulse
+    elif indent_stats.impulse > 0:  # NOTE: for positive, *impulse* counts.
+        # in case of more than one indent we loop and apply to all.
+        for b in range(0, indent_stats.impulse):
+            matched_indents[(balance + b + 1) * 1.0].append(elem_idx)
+        balance += indent_stats.impulse
+    return balance, matched_indents
+
+
+def _match_indents(
+    line_elements: ReflowSequenceType,
+    rebreak_priorities: Dict[int, int],
+    newline_idx: int,
+    allow_implicit_indents: bool = False,
+) -> MatchedIndentsType:
+    """Identify indent points, taking into account rebreak_priorities.
+
+    Expect fractional keys, because of the half values for
+    rebreak points.
+    """
+    balance = 0
+    matched_indents: MatchedIndentsType = defaultdict(list)
+    implicit_indents: Dict[int, Tuple[int, ...]] = {}
+    for idx, e in enumerate(line_elements):
+        # We only care about points, because only they contain indents.
+        if not isinstance(e, ReflowPoint):
+            continue
+
+        # As usual, indents are referred to by their "uphill" side
+        # so what number we store the point against depends on whether
+        # it's positive or negative.
+        # NOTE: Here we don't actually pass in the forward types because
+        # we don't need them for the output. It doesn't make a difference.
+        indent_stats = e.get_indent_impulse(allow_implicit_indents, set())
+        e_idx = newline_idx - len(line_elements) + idx + 1
+        # Save any implicit indents.
+        if indent_stats.implicit_indents:
+            implicit_indents[e_idx] = indent_stats.implicit_indents
+        balance, nmi = _increment_balance(balance, indent_stats, e_idx)
+        # Incorporate nmi into matched_indents
+        for b, indices in nmi.items():
+            matched_indents[b].extend(indices)
+
+        # Something can be both an indent point AND a rebreak point.
+        if idx in rebreak_priorities:
+            # For potential rebreak options (i.e. ones without an indent)
+            # we add 0.5 so that they sit *between* the varying indent
+            # options. that means we split them before any of their
+            # content, but don't necessarily split them when their
+            # container is split.
+
+            # Also to spread out the breaks within an indent, we further
+            # add hints to distinguish between them. This is where operator
+            # precedence (as defined above) actually comes into effect.
+            priority = rebreak_priorities[idx]
+            # Assume `priority` in range 0 - 50. So / 100 to add to 0.5.
+            matched_indents[balance + 0.5 + (priority / 100)].append(e_idx)
+        else:
+            continue
+
+    # Before working out the lowest option, we purge any which contain
+    # ONLY the final point. That's because adding indents there won't
+    # actually help the line length. There's *already* a newline there.
+    for indent_level in list(matched_indents.keys()):
+        if matched_indents[indent_level] == [newline_idx]:
+            matched_indents.pop(indent_level)
+            reflow_logger.debug(
+                "    purging balance of %s, it references only the final element.",
+                indent_level,
+            )
+
+    # ADDITIONALLY - if implicit indents are allowed we should
+    # only use them if they match another untaken point (which isn't
+    # implicit, or the end of the line).
+    # NOTE: This logic might be best suited to be sited elsewhere
+    # when (and if) we introduce smarter choices on where to add
+    # indents.
+    if allow_implicit_indents:
+        for indent_level in list(matched_indents.keys()):
+            major_points = set(matched_indents[indent_level]).difference(
+                [newline_idx], implicit_indents.keys()
+            )
+            if not major_points:
+                matched_indents.pop(indent_level)
+                reflow_logger.debug(
+                    "    purging balance of %s, it references implicit indents "
+                    "or the final indent.",
+                    indent_level,
+                )
+
+    return matched_indents
+
+
+def _fix_long_line_with_comment(
+    line_buffer: ReflowSequenceType,
+    elements: ReflowSequenceType,
+    current_indent: str,
+    line_length_limit: int,
+    last_indent_idx: Optional[int],
+    trailing_comments: str = "before",
+) -> Tuple[ReflowSequenceType, List[LintFix]]:
+    """Fix long line by moving trailing comments if possible.
+
+    This method (unlike the ones for normal lines), just returns
+    a new `elements` argument rather than mutating it.
+    """
+    # If the comment contains a noqa, don't fix it. It's unsafe.
+    if "noqa" in line_buffer[-1].segments[-1].raw:
+        reflow_logger.debug("    Unfixable because noqa unsafe to move.")
+        return elements, []
+
+    # If the comment is longer than the limit _anyway_, don't move
+    # it. It will still be too long.
+    if len(line_buffer[-1].segments[-1].raw) + len(current_indent) > line_length_limit:
+        reflow_logger.debug("    Unfixable because comment too long anyway.")
+        return elements, []
+
+    comment_seg = line_buffer[-1].segments[-1]
+    first_seg = line_buffer[0].segments[0]
+    last_elem_idx = elements.index(line_buffer[-1])
+
+    assert trailing_comments in (
+        "after",
+        "before",
+    ), f"Unexpected value for `trailing_comments`: {trailing_comments!r}"
+
+    # The simpler case if if we're moving the comment to the line
+    # _after_. In that case we just coerce the point before it to
+    # be an indent.
+    if trailing_comments == "after":
+        anchor_point = cast(ReflowPoint, line_buffer[-2])
+        results, new_point = anchor_point.indent_to(current_indent, before=comment_seg)
+        elements = (
+            elements[: last_elem_idx - 1] + [new_point] + elements[last_elem_idx:]
+        )
+        return elements, fixes_from_results(results)
+
+    # Otherwise we're moving it up and _before_ the line, which is
+    # a little more involved (but also the default).
+    fixes = [
+        # Remove the comment from it's current position, and any
+        # whitespace in the previous point.
+        LintFix.delete(comment_seg),
+        *[
+            LintFix.delete(ws)
+            for ws in line_buffer[-2].segments
+            if ws.is_type("whitespace")
+        ],
+    ]
+
+    # Are we at the start of the file? If so, there's no
+    # indent, and also no previous segments to deal with.
+    if last_indent_idx is None:
+        new_point = ReflowPoint((NewlineSegment(),))
+        prev_elems = []
+        anchor = first_seg
+    else:
+        new_point = ReflowPoint(
+            (
+                NewlineSegment(),
+                WhitespaceSegment(current_indent),
+            )
+        )
+        prev_elems = elements[: last_indent_idx + 1]
+        anchor = elements[last_indent_idx + 1].segments[0]
+
+    fixes.append(
+        # NOTE: This looks a little convoluted, but we create
+        # *before* a block here rather than *after* a point,
+        # because the point may have been modified already by
+        # reflow code and may not be a reliable anchor.
+        LintFix.create_before(
+            anchor,
+            [
+                comment_seg,
+                *new_point.segments,
+            ],
+        )
+    )
+
+    elements = (
+        prev_elems
+        + [
+            line_buffer[-1],
+            new_point,
+        ]
+        + line_buffer[:-2]
+        + elements[last_elem_idx + 1 :]
+    )
+
+    return elements, fixes
+
+
+def _fix_long_line_with_fractional_targets(
+    elements: ReflowSequenceType, target_breaks: List[int], desired_indent: str
+) -> List[LintResult]:
+    """Work out fixes for splitting a long line at locations like operators.
+
+    NOTE: This mutates `elements` to avoid copying.
+
+    This is a helper function within .lint_line_length().
+    """
+    line_results = []
+    for e_idx in target_breaks:
+        e = cast(ReflowPoint, elements[e_idx])
+        new_results, new_point = e.indent_to(
+            desired_indent,
+            after=elements[e_idx - 1].segments[-1],
+            before=elements[e_idx + 1].segments[0],
+        )
+        # NOTE: Mutation of elements.
+        elements[e_idx] = new_point
+        line_results += new_results
+    return line_results
+
+
+def _fix_long_line_with_integer_targets(
+    elements: ReflowSequenceType,
+    target_breaks: List[int],
+    line_length_limit: int,
+    inner_indent: str,
+    outer_indent: str,
+    allow_implicit_indents: bool,
+) -> List[LintResult]:
+    """Work out fixes for splitting a long line at locations like indents.
+
+    NOTE: This mutates `elements` to avoid copying.
+
+    This is a helper function within .lint_line_length().
+    """
+    line_results = []
+    # Create a stash of indent_stats. We're going to need them
+    # twice, so we generate them one for later use.
+    _indent_stats_cache: Dict[int, IndentStats] = {}
+    for e_idx in target_breaks:
+        # Generate indent stats for it.
+        e = cast(ReflowPoint, elements[e_idx])
+        # We need to check for negative sections so they get the right
+        # indent (otherwise they'll be over indented).
+        # The `desired_indent` above is for the "uphill" side.
+        following_class_types = elements[e_idx + 1].class_types
+        indent_stats = e.get_indent_impulse(
+            allow_implicit_indents, following_class_types
+        )
+        # Cache them for later
+        _indent_stats_cache[e_idx] = indent_stats
+
+    # If we can get to the uphill indent of later break, and still be within
+    # the line limit, then we can skip everything before it.
+    purge_before = 0
+    for e_idx in target_breaks:
+        # Is the following block already past the limit?
+        # NOTE: We use the block because we know it will have segments.
+        if not elements[e_idx + 1].segments[0].pos_marker:
+            # If it doesn't have position - we should just bow out
+            # now. It's too complicated.
+            break  # pragma: no cover
+        if (
+            elements[e_idx + 1].segments[0].pos_marker.working_line_pos
+            > line_length_limit
+        ):
+            # If we're past the line length limit, stop looking.
+            break
+
+        # Fetch cached indent stats
+        indent_stats = _indent_stats_cache[e_idx]
+        if indent_stats.trough < 0:
+            # It's negative. Skip onward.
+            continue
+
+        # If we get this far, then it's positive, but still within
+        # the line limit. We can purge any pairs before this.
+        purge_before = e_idx
+        reflow_logger.debug("    ...breaks before %s unnecessary.", purge_before)
+    # Only keep indices which are after the critical point.
+    target_breaks = [e_idx for e_idx in target_breaks if e_idx >= purge_before]
+    reflow_logger.debug("    Remaining breaks: %s.", target_breaks)
+
+    for e_idx in target_breaks:
+        e = cast(ReflowPoint, elements[e_idx])
+        indent_stats = _indent_stats_cache[e_idx]
+        # NOTE: We check against the _impulse_ here rather than the
+        # _trough_ because if we're about to step back up again then
+        # it should still be indented.
+        if indent_stats.impulse < 0:
+            new_indent = outer_indent
+            # NOTE: If we're about to insert a dedent before a
+            # comma or semicolon ... don't. They are a bit special
+            # in being allowed to trail.
+            if elements[e_idx + 1].class_types.intersection(
+                ("statement_terminator", "comma")
+            ):
+                reflow_logger.debug("    Skipping dedent before comma or semicolon.")
+                # We break rather than continue because this is
+                # necessarily a step back down.
+                break
+        else:
+            new_indent = inner_indent
+
+        new_results, new_point = e.indent_to(
+            new_indent,
+            after=elements[e_idx - 1].segments[-1],
+            before=elements[e_idx + 1].segments[0],
+        )
+        # NOTE: Mutation of elements.
+        elements[e_idx] = new_point
+        line_results += new_results
+
+        # If the balance is *also* negative, then we should also stop.
+        # We've indented a whole section - that's enough for now.
+        # We've already skipped over any unnecessary sections, and they shouldn't
+        # be reassessed on the next pass. If there are later sections which *also*
+        # need to be reindented, then we'll catch them when we come back around.
+        if indent_stats.trough < 0:
+            reflow_logger.debug("    Stopping as we're back down.")
+            break
+
+    return line_results
+
+
+def lint_line_length(
+    elements: ReflowSequenceType,
+    root_segment: BaseSegment,
+    single_indent: str,
+    line_length_limit: int,
+    allow_implicit_indents: bool = False,
+    trailing_comments: str = "before",
+) -> Tuple[ReflowSequenceType, List[LintResult]]:
+    """Lint the sequence to lines over the configured length.
+
+    NOTE: This assumes that `lint_indent_points` has already
+    been run. The method won't necessarily *fail* but it does
+    assume that the current indent is correct and that indents
+    have already been inserted where they're missing.
+    """
+    # First check whether we should even be running this check.
+    if line_length_limit <= 0:
+        reflow_logger.debug("# Line length check disabled.")
+        return elements, []
+
+    reflow_logger.debug("# Evaluate lines for length.")
+    # Make a working copy to mutate.
+    elem_buffer: ReflowSequenceType = elements.copy()
+    line_buffer: ReflowSequenceType = []
+    results: List[LintResult] = []
+
+    last_indent_idx = None
+    for i, elem in enumerate(elem_buffer):
+        # Are there newlines in the element?
+        # If not, add it to the buffer and wait to evaluate the line.
+        # If yes, it's time to evaluate the line.
+
+        if isinstance(elem, ReflowPoint) and (
+            # Is it the end of the file?
+            # NOTE: Here, we're actually looking to see whether we're
+            # currently on the _point before the end of the file_ rather
+            # than actually on the final block. This is important because
+            # the following code assumes we're on a point and not a block.
+            # We're safe from indexing errors if we're on a point, because
+            # we know there's always a trailing block.
+            "end_of_file" in elem_buffer[i + 1].class_types
+            # Or is there a newline?
+            or has_untemplated_newline(cast(ReflowPoint, elem))
+        ):
+            # In either case we want to process this, so carry on.
+            pass
+        else:
+            # Otherwise build up the buffer and loop around again.
+            line_buffer.append(elem)
+            continue
+
+        # If we don't have a buffer yet, also carry on. Nothing to lint.
+        if not line_buffer:
+            continue
+
+        # Evaluate a line
+
+        # Get the current indent.
+        if last_indent_idx is not None:
+            current_indent = _deduce_line_current_indent(elem_buffer, last_indent_idx)
+        else:
+            current_indent = ""
+
+        # Get the length of all the elements on the line (other than the indent).
+        # NOTE: This is the length in the _source_, because that's the line
+        # length that the reader is actually looking at.
+        char_len = _source_char_len(line_buffer)
+
+        # Is the line over the limit length?
+        line_len = len(current_indent) + char_len
+        # NOTE: We should be able to rely on the first elements of the line having
+        # a non-zero number of segments. If this isn't the case we may need to add
+        # a clause to handle that scenario here.
+        assert line_buffer[0].segments
+        first_seg = line_buffer[0].segments[0]
+        line_no = first_seg.pos_marker.working_line_no
+        if line_len <= line_length_limit:
+            reflow_logger.info(
+                "    Line #%s. Length %s <= %s. OK.",
+                line_no,
+                line_len,
+                line_length_limit,
+            )
+        else:
+            reflow_logger.info(
+                "    Line #%s. Length %s > %s. PROBLEM.",
+                line_no,
+                line_len,
+                line_length_limit,
+            )
+
+            # Potential places to shorten the line are either indent locations
+            # or segments with a defined line position (like operators).
+
+            # NOTE: We make a buffer including the closing point, because we're
+            # looking for pairs of indents and dedents. The closing dedent for one
+            # of those pairs might be in the closing point so if we don't have it
+            # then we'll miss any locations which have their closing dedent at
+            # the end of the line.
+            line_elements = line_buffer + [elem]
+
+            # Type hints
+            fixes: List[LintFix]
+
+            # Identify rebreak spans first so we can work out their indentation
+            # in the next section.
+            # NOTE: In identifying spans, we give the method a little more than
+            # the line, so that it can correctly identify the ends of things
+            # accurately. It's safe to go to i+1 because there is always an
+            # end_of_file marker at the end which we could span into.
+            spans = identify_rebreak_spans(
+                line_elements + [elements[i + 1]], root_segment
+            )
+            reflow_logger.debug("    spans: %s", spans)
+            rebreak_priorities = _rebreak_priorities(spans)
+            reflow_logger.debug("    rebreak_priorities: %s", rebreak_priorities)
+
+            # Identify indent points second, taking into
+            # account rebreak_priorities.
+            matched_indents = _match_indents(
+                line_elements,
+                rebreak_priorities,
+                i,
+                allow_implicit_indents=allow_implicit_indents,
+            )
+            reflow_logger.debug("    matched_indents: %s", matched_indents)
+
+            # If we don't have any matched_indents, we don't have any options.
+            # This could be for things like comment lines.
+            desc = f"Line is too long ({line_len} > {line_length_limit})."
+            # Easiest option are lines ending with comments, but that aren't *all*
+            # comments and the comment itself is shorter than the limit.
+            # The reason for that last clause is that if the comment (plus an indent)
+            # is already longer than the limit, then there's no point just putting it
+            # on a new line - it will still fail - so it doesn't actually fix the issue.
+            # Deal with them first.
+            if (
+                len(line_buffer) > 1
+                # We can only fix _inline_ comments in this way. Others should
+                # just be flagged as issues.
+                and line_buffer[-1].segments[-1].is_type("inline_comment")
+            ):
+                reflow_logger.debug("    Handling as inline comment line.")
+                elem_buffer, fixes = _fix_long_line_with_comment(
+                    line_buffer,
+                    elem_buffer,
+                    current_indent,
+                    line_length_limit,
+                    last_indent_idx,
+                    trailing_comments=trailing_comments,
+                )
+
+            # Then check for cases where we have no other options.
+            elif not matched_indents:
+                # NOTE: In this case we have no options for shortening the line.
+                # We'll still report a linting issue - but no fixes are provided.
+                reflow_logger.debug("    Handling as unfixable line.")
+                fixes = []
+
+            # Lastly deal with the "normal" case.
+            else:
+                # For now, the algorithm we apply isn't particularly elegant
+                # and just finds the "outermost" opportunity to add additional
+                # line breaks and adds them.
+                # TODO: Make this more elegant later. The two obvious directions
+                # would be to potentially add a) line breaks at multiple levels
+                # in a single pass and b) to selectively skip levels if they're
+                # "trivial", or if there would be a more suitable inner indent
+                # to add first (e.g. the case of "(((((((a)))))))").
+                reflow_logger.debug("    Handling as normal line.")
+                # NOTE: Double indents (or more likely dedents) will be
+                # potentially in *multiple* sets - don't double count them
+                # if we start doing something more clever.
+                target_balance = min(matched_indents.keys())
+                desired_indent = current_indent
+                if target_balance >= 1:
+                    desired_indent += single_indent
+                target_breaks = matched_indents[target_balance]
+                reflow_logger.debug(
+                    "    Targeting balance of %s, indent: %r for %s",
+                    target_balance,
+                    desired_indent,
+                    target_breaks,
+                )
+
+                # Is one of the locations the final element? If so remove it.
+                # There's already a line break there.
+                if i in target_breaks:
+                    target_breaks.remove(i)
+
+                # Is it an "integer" indent or a fractional indent?
+                # Integer indents (i.e. 1.0, 2.0, ...) are based on Indent and
+                # Dedent tokens. Fractional indents (i.e. 1.5, 1.52, ...) are
+                # based more on rebreak spans (e.g. around commas and operators).
+                # The latter is simpler in that it doesn't change the indents,
+                # just adds line breaks. The former is more complicated.
+                # NOTE: Both of these methods mutate the `elem_buffer`.
+                if target_balance % 1 == 0:
+                    line_results = _fix_long_line_with_integer_targets(
+                        elem_buffer,
+                        target_breaks,
+                        line_length_limit,
+                        desired_indent,
+                        current_indent,
+                        allow_implicit_indents=allow_implicit_indents,
+                    )
+                else:
+                    line_results = _fix_long_line_with_fractional_targets(
+                        elem_buffer, target_breaks, desired_indent
+                    )
+
+                # Consolidate all the results for the line into one.
+                fixes = fixes_from_results(line_results)
+
+            results.append(
+                LintResult(
+                    # First segment on the line is the result anchor.
+                    first_seg,
+                    fixes=fixes,
+                    description=desc,
+                    source="reflow.long_line",
+                )
+            )
+
+        # Regardless of whether the line was good or not, clear
+        # the buffers ready for the next line.
+        line_buffer = []
+        last_indent_idx = i
+
+    return elem_buffer, results
diff --git a/src/sqlfluff/utils/reflow/respace.py b/src/sqlfluff/utils/reflow/respace.py
index 562e17c..6316a9e 100644
--- a/src/sqlfluff/utils/reflow/respace.py
+++ b/src/sqlfluff/utils/reflow/respace.py
@@ -4,9 +4,10 @@
 import logging
 from typing import List, Optional, Tuple, cast, TYPE_CHECKING
 
-from sqlfluff.core.parser import BaseSegment, RawSegment
+from sqlfluff.core.parser import BaseSegment, RawSegment, PositionMarker
 from sqlfluff.core.parser.segments.raw import WhitespaceSegment
 from sqlfluff.core.rules.base import LintFix, LintResult
+from sqlfluff.core.errors import SQLFluffUserError
 
 from sqlfluff.utils.reflow.helpers import pretty_segment_name
 
@@ -21,6 +22,37 @@ if TYPE_CHECKING:  # pragma: no cover
 reflow_logger = logging.getLogger("sqlfluff.rules.reflow")
 
 
+def _unpack_constraint(constraint: str, strip_newlines: bool):
+    """Unpack a spacing constraint.
+
+    Used as a helper function in `determine_constraints`.
+    """
+    # Check for deprecated options.
+    if constraint == "inline":  # pragma: no cover
+        reflow_logger.warning(
+            "Found 'inline' specified as a 'spacing_within' constraint. "
+            "This setting is deprecated and has been replaced by the more "
+            "explicit 'touch:inline'. Upgrade your configuration to "
+            "remove this warning."
+        )
+        constraint = "touch:inline"
+
+    # Unless align, split.
+    if constraint.startswith("align"):
+        modifier = ""
+    else:
+        constraint, _, modifier = constraint.partition(":")
+
+    if not modifier:
+        pass
+    elif modifier == "inline":
+        strip_newlines = True
+    else:  # pragma: no cover
+        raise SQLFluffUserError(f"Unexpected constraint modifier: {constraint!r}")
+
+    return constraint, strip_newlines
+
+
 def determine_constraints(
     prev_block: Optional["ReflowBlock"],
     next_block: Optional["ReflowBlock"],
@@ -28,49 +60,46 @@ def determine_constraints(
 ) -> Tuple[str, str, bool]:
     """Given the surrounding blocks, determine appropriate constraints."""
     # Start with the defaults.
-    pre_constraint = prev_block.spacing_after if prev_block else "single"
-    post_constraint = next_block.spacing_before if next_block else "single"
+    pre_constraint, strip_newlines = _unpack_constraint(
+        prev_block.spacing_after if prev_block else "single", strip_newlines
+    )
+    post_constraint, strip_newlines = _unpack_constraint(
+        next_block.spacing_before if next_block else "single", strip_newlines
+    )
 
     # Work out the common parent segment and depth
+    within_spacing = ""
     if prev_block and next_block:
         common = prev_block.depth_info.common_with(next_block.depth_info)
         # Just check the most immediate parent for now for speed.
         # TODO: Review whether just checking the parent is enough.
         # NOTE: spacing configs will be available on both sides if they're common
         # so it doesn't matter whether we get it from prev_block or next_block.
+        idx = prev_block.depth_info.stack_hashes.index(common[-1])
+
         within_constraint = prev_block.stack_spacing_configs.get(common[-1], None)
-        if not within_constraint:
-            pass
-        elif within_constraint in ("touch", "inline"):
-            # NOTE: inline is actually a more extreme version of "touch".
-            # Examples:
-            # - "inline" would be used with an object reference, where the
-            #   parts have to all be together on one line like `a.b.c`.
-            # - "touch" would allow the above layout, _but also_ allow an
-            #   an optional line break between, much like between an opening
-            #   bracket and the following element: `(a)` or:
-            #   ```
-            #   (
-            #       a
-            #   )
-            #   ```
-            if within_constraint == "inline":
-                # If they are then strip newlines.
-                strip_newlines = True
-            # If segments are expected to be touch within. Then modify
-            # constraints accordingly.
-            # NOTE: We don't override if it's already "any"
-            if pre_constraint != "any":
-                pre_constraint = "touch"
-            if post_constraint != "any":
-                post_constraint = "touch"
-        else:  # pragma: no cover
-            idx = prev_block.depth_info.stack_hashes.index(common[-1])
-            raise NotImplementedError(
-                f"Unexpected within constraint: {within_constraint} for "
-                f"{prev_block.depth_info.stack_class_types[idx]}"
+        if within_constraint:
+            within_spacing, strip_newlines = _unpack_constraint(
+                within_constraint, strip_newlines
             )
 
+    # If segments are expected to be touch within. Then modify
+    # constraints accordingly.
+    if within_spacing == "touch":
+        # NOTE: We don't override if it's already "any"
+        if pre_constraint != "any":
+            pre_constraint = "touch"
+        if post_constraint != "any":
+            post_constraint = "touch"
+    elif within_spacing == "single":
+        pass
+    elif within_spacing:  # pragma: no cover
+        assert prev_block
+        raise SQLFluffUserError(
+            f"Unexpected within constraint: {within_constraint!r} for "
+            f"{prev_block.depth_info.stack_class_types[idx]}"
+        )
+
     return pre_constraint, post_constraint, strip_newlines
 
 
@@ -89,6 +118,7 @@ def process_spacing(
             last_whitespace.append(seg)
 
         # If it's a newline, react accordingly.
+        # NOTE: This should only trigger on literal newlines.
         elif seg.is_type("newline", "end_of_file"):
             # Are we stripping newlines?
             if strip_newlines and seg.is_type("newline"):
@@ -96,7 +126,7 @@ def process_spacing(
                 removal_buffer.append(seg)
                 result_buffer.append(
                     LintResult(
-                        seg, [LintFix.delete(seg)], description="Stripping newlines."
+                        seg, [LintFix.delete(seg)], description="Unexpected line break."
                     )
                 )
                 # Carry on as though it wasn't here.
@@ -151,6 +181,7 @@ def _determine_aligned_inline_spacing(
     root_segment: BaseSegment,
     whitespace_seg: RawSegment,
     next_seg: RawSegment,
+    next_pos: PositionMarker,
     segment_type: str,
     align_within: Optional[str],
     align_scope: Optional[str],
@@ -159,7 +190,12 @@ def _determine_aligned_inline_spacing(
     # Find the level of segment that we're aligning.
     # NOTE: Reverse slice
     parent_segment = None
-    for ps in root_segment.path_to(next_seg)[::-1]:
+
+    # Edge case: if next_seg has no position, we should use the position
+    # of the whitespace for searching.
+    for ps in root_segment.path_to(next_seg if next_seg.pos_marker else whitespace_seg)[
+        ::-1
+    ]:
         if ps.segment.is_type(align_within):
             parent_segment = ps.segment
         if ps.segment.is_type(align_scope):
@@ -184,12 +220,27 @@ def _determine_aligned_inline_spacing(
                 sibling,
             )
 
+    # If there's only one sibling, we have nothing to compare to. Default to a single
+    # space.
+    if len(siblings) <= 1:
+        desired_space = " "
+        reflow_logger.debug(
+            "    desired_space: %r (based on no other siblings)",
+            desired_space,
+        )
+        return desired_space
+
+    # If the segment we're aligning, has position. Use that position.
+    # If it doesn't, then use the provided one. We can't do sibling analysis without it.
+    if next_seg.pos_marker:
+        next_pos = next_seg.pos_marker
+
     # Is the current indent the only one on the line?
     if any(
         # Same line
-        sibling.pos_marker.working_line_no == next_seg.pos_marker.working_line_no
+        sibling.pos_marker.working_line_no == next_pos.working_line_no
         # And not same position (i.e. not self)
-        and sibling.pos_marker.working_line_pos != next_seg.pos_marker.working_line_pos
+        and sibling.pos_marker.working_line_pos != next_pos.working_line_pos
         for sibling in siblings
     ):
         reflow_logger.debug("    Found sibling on same line. Treat as single")
@@ -229,9 +280,37 @@ def _determine_aligned_inline_spacing(
     return desired_space
 
 
+def _extract_alignment_config(
+    constraint: str,
+) -> Tuple[str, Optional[str], Optional[str]]:
+    """Helper function to break apart an alignment config.
+
+    >>> _extract_alignment_config("align:alias_expression")
+    ('alias_expression', None, None)
+    >>> _extract_alignment_config("align:alias_expression:statement")
+    ('alias_expression', 'statement', None)
+    >>> _extract_alignment_config("align:alias_expression:statement:bracketed")
+    ('alias_expression', 'statement', 'bracketed')
+    """
+    assert ":" in constraint
+    alignment_config = constraint.split(":")
+    assert alignment_config[0] == "align"
+    seg_type = alignment_config[1]
+    align_within = alignment_config[2] if len(alignment_config) > 2 else None
+    align_scope = alignment_config[3] if len(alignment_config) > 3 else None
+    reflow_logger.debug(
+        "    Alignment Config: %s, %s, %s",
+        seg_type,
+        align_within,
+        align_scope,
+    )
+    return seg_type, align_within, align_scope
+
+
 def handle_respace__inline_with_space(
     pre_constraint: str,
     post_constraint: str,
+    prev_block: Optional["ReflowBlock"],
     next_block: Optional["ReflowBlock"],
     root_segment: BaseSegment,
     segment_buffer: List[RawSegment],
@@ -287,33 +366,45 @@ def handle_respace__inline_with_space(
     ) or pre_constraint == post_constraint == "single":
         # Determine the desired spacing, either as alignment or as a single.
         if post_constraint.startswith("align") and next_block:
-            alignment_config = post_constraint.split(":")
-            seg_type = alignment_config[1]
-            align_within = alignment_config[2] if len(alignment_config) > 2 else None
-            align_scope = alignment_config[3] if len(alignment_config) > 3 else None
-            reflow_logger.debug(
-                "    Alignment Config: %s, %s, %s, %s",
-                seg_type,
-                align_within,
-                align_scope,
-                next_block.segments[0].pos_marker.working_line_pos,
+            seg_type, align_within, align_scope = _extract_alignment_config(
+                post_constraint
             )
 
-            desired_space = _determine_aligned_inline_spacing(
-                root_segment,
-                last_whitespace,
-                next_block.segments[0],
-                seg_type,
-                align_within,
-                align_scope,
-            )
+            next_pos: Optional[PositionMarker]
+            if next_block.segments[0].pos_marker:
+                next_pos = next_block.segments[0].pos_marker
+            elif last_whitespace.pos_marker:
+                next_pos = last_whitespace.pos_marker.end_point_marker()
+            # These second clauses are much less likely and so are excluded from
+            # coverage. If we find a way of covering them, that would be great
+            # but for now they exist as backups.
+            elif prev_block and prev_block.segments[-1].pos_marker:  # pragma: no cover
+                next_pos = prev_block.segments[-1].pos_marker.end_point_marker()
+            else:  # pragma: no cover
+                reflow_logger.info("Unable to find position marker for alignment.")
+                next_pos = None
+                desired_space = " "
+                desc = (
+                    "Expected only single space. " "Found " f"{last_whitespace.raw!r}."
+                )
 
-            desc = (
-                f"{seg_type!r} elements are expected to be aligned. Found "
-                "incorrect whitespace before "
-                f"{pretty_segment_name(next_block.segments[0])}: "
-                f"{last_whitespace.raw!r}."
-            )
+            if next_pos:
+                desired_space = _determine_aligned_inline_spacing(
+                    root_segment,
+                    last_whitespace,
+                    next_block.segments[0],
+                    next_pos,
+                    seg_type,
+                    align_within,
+                    align_scope,
+                )
+
+                desc = (
+                    f"{seg_type!r} elements are expected to be aligned. Found "
+                    "incorrect whitespace before "
+                    f"{pretty_segment_name(next_block.segments[0])}: "
+                    f"{last_whitespace.raw!r}."
+                )
         else:
             if next_block:
                 desc = (
@@ -379,6 +470,16 @@ def handle_respace__inline_without_space(
         # Either because there shouldn't be, or because "any"
         # means we shouldn't check.
         return segment_buffer, existing_results, False
+    # Are we supposed to be aligning?
+    elif post_constraint.startswith("align"):
+        reflow_logger.debug("    Inserting Aligned Whitespace.")
+        # TODO: We currently rely on a second pass to align
+        # insertions. This is where we could devise alignment
+        # in advance, but most of the alignment code relies on
+        # having existing position markers for those insertions.
+        # https://github.com/sqlfluff/sqlfluff/issues/4492
+        desired_space = " "
+        added_whitespace = WhitespaceSegment(desired_space)
     # Is it anything other than the default case?
     elif not (pre_constraint == post_constraint == "single"):  # pragma: no cover
         # TODO: This will get test coverage when configuration routines
@@ -386,15 +487,15 @@ def handle_respace__inline_without_space(
         raise NotImplementedError(
             f"Unexpected Constraints: {pre_constraint}, {post_constraint}"
         )
+    else:
+        # Default to a single whitespace
+        reflow_logger.debug("    Inserting Single Whitespace.")
+        added_whitespace = WhitespaceSegment()
 
-    # Handle the default case
-
-    # Insert a single whitespace.
-    reflow_logger.debug("    Inserting Single Whitespace.")
     # Add it to the buffer first (the easy bit). The hard bit
     # is to then determine how to generate the appropriate LintFix
     # objects.
-    segment_buffer.append(WhitespaceSegment())
+    segment_buffer.append(added_whitespace)
 
     # So special handling here. If segments either side
     # already exist then we don't care which we anchor on
@@ -441,9 +542,9 @@ def handle_respace__inline_without_space(
         # Mutate the fix, it's still in the same result, and that result
         # is still in the existing_results.
         if existing_fix == "before":
-            fix.edit = [cast(BaseSegment, WhitespaceSegment())] + fix.edit
+            fix.edit = [cast(BaseSegment, added_whitespace)] + fix.edit
         elif existing_fix == "after":
-            fix.edit = fix.edit + [cast(BaseSegment, WhitespaceSegment())]
+            fix.edit = fix.edit + [cast(BaseSegment, added_whitespace)]
 
         # No need to add new results, because we mutated the existing.
         return segment_buffer, existing_results, True
diff --git a/src/sqlfluff/utils/reflow/sequence.py b/src/sqlfluff/utils/reflow/sequence.py
index db32ed7..58d533a 100644
--- a/src/sqlfluff/utils/reflow/sequence.py
+++ b/src/sqlfluff/utils/reflow/sequence.py
@@ -10,8 +10,18 @@ from sqlfluff.core.rules.base import LintFix, LintResult
 from sqlfluff.utils.reflow.config import ReflowConfig
 from sqlfluff.utils.reflow.depthmap import DepthMap
 
-from sqlfluff.utils.reflow.elements import ReflowBlock, ReflowPoint, ReflowSequenceType
+from sqlfluff.utils.reflow.elements import (
+    ReflowBlock,
+    ReflowPoint,
+    ReflowSequenceType,
+    get_consumed_whitespace,
+)
 from sqlfluff.utils.reflow.rebreak import rebreak_sequence
+from sqlfluff.utils.reflow.reindent import (
+    lint_indent_points,
+    construct_single_indent,
+    lint_line_length,
+)
 from sqlfluff.utils.reflow.helpers import fixes_from_results
 
 # We're in the utils module, but users will expect reflow
@@ -127,7 +137,12 @@ class ReflowSequence:
         for seg in segments:
             # NOTE: end_of_file is block-like rather than point-like.
             # This is to facilitate better evaluation of the ends of files.
-            if seg.is_type("whitespace", "newline", "indent"):
+            # NOTE: This also allows us to include literal placeholders for
+            # whitespace only strings.
+            if (
+                seg.is_type("whitespace", "newline", "indent")
+                or (get_consumed_whitespace(seg) or "").isspace()
+            ):
                 # Add to the buffer and move on.
                 seg_buff.append(seg)
                 continue
@@ -500,9 +515,6 @@ class ReflowSequence:
                 new_point = point
             # Otherwise apply the new fixes
             else:
-                reflow_logger.debug(
-                    "    Filter %r allows fixes for point: %s", filter, new_lint_results
-                )
                 lint_results = new_lint_results
 
             if pre and (not new_elements or new_elements[-1] != pre):
@@ -546,3 +558,65 @@ class ReflowSequence:
             depth_map=self.depth_map,
             lint_results=lint_results,
         )
+
+    def reindent(self):
+        """Reindent lines within a sequence."""
+        if self.lint_results:
+            raise NotImplementedError(  # pragma: no cover
+                "rebreak cannot currently handle pre-existing embodied fixes."
+            )
+
+        single_indent = construct_single_indent(
+            indent_unit=self.reflow_config.indent_unit,
+            tab_space_size=self.reflow_config.tab_space_size,
+        )
+
+        reflow_logger.info("# Evaluating indents.")
+        elements, indent_results = lint_indent_points(
+            self.elements,
+            single_indent=single_indent,
+            skip_indentation_in=self.reflow_config.skip_indentation_in,
+            allow_implicit_indents=self.reflow_config.allow_implicit_indents,
+        )
+
+        return ReflowSequence(
+            elements=elements,
+            root_segment=self.root_segment,
+            reflow_config=self.reflow_config,
+            depth_map=self.depth_map,
+            lint_results=indent_results,
+        )
+
+    def break_long_lines(self):
+        """Rebreak any remaining long lines in a sequence.
+
+        This assumes that reindent() has already been applied.
+        """
+        if self.lint_results:
+            raise NotImplementedError(  # pragma: no cover
+                "break_long_lines cannot currently handle pre-existing "
+                "embodied fixes."
+            )
+
+        single_indent = construct_single_indent(
+            indent_unit=self.reflow_config.indent_unit,
+            tab_space_size=self.reflow_config.tab_space_size,
+        )
+
+        reflow_logger.info("# Evaluating line lengths.")
+        elements, length_results = lint_line_length(
+            self.elements,
+            self.root_segment,
+            single_indent=single_indent,
+            line_length_limit=self.reflow_config.max_line_length,
+            allow_implicit_indents=self.reflow_config.allow_implicit_indents,
+            trailing_comments=self.reflow_config.trailing_comments,
+        )
+
+        return ReflowSequence(
+            elements=elements,
+            root_segment=self.root_segment,
+            reflow_config=self.reflow_config,
+            depth_map=self.depth_map,
+            lint_results=length_results,
+        )
diff --git a/src/sqlfluff/utils/testing/logging.py b/src/sqlfluff/utils/testing/logging.py
new file mode 100644
index 0000000..125327b
--- /dev/null
+++ b/src/sqlfluff/utils/testing/logging.py
@@ -0,0 +1,62 @@
+"""This is a modified log capture mechanism which reliably works.
+
+So that logs are handled appropriately by the CLI, sqlfluff
+modifies the root logger in a way that can conflict with pytest.
+
+See: https://github.com/pytest-dev/pytest/issues/3697
+
+This fixture returns a context manager to handle them better
+and enable testing of logs while working around the restrictions
+of setting the `propagate` attribute of the logger in each test.
+
+Code adapted from:
+https://github.com/pytest-dev/pytest/issues/3697#issuecomment-792129636
+
+"""
+
+import logging
+from typing import Iterator
+from contextlib import contextmanager
+
+from _pytest.logging import LogCaptureHandler, _remove_ansi_escape_sequences
+
+
+class FluffLogHandler(LogCaptureHandler):
+    """A modified LogCaptureHandler which also exposes some helper functions.
+
+    The aim is to mimic some of the methods available on caplog.
+
+    See:
+    https://docs.pytest.org/en/7.1.x/_modules/_pytest/logging.html
+    """
+
+    @property
+    def text(self) -> str:
+        """The formatted log text."""
+        return _remove_ansi_escape_sequences(self.stream.getvalue())
+
+
+@contextmanager
+def fluff_log_catcher(level: int, logger_name: str) -> Iterator[FluffLogHandler]:
+    """Context manager that sets the level for capturing of logs.
+
+    After the end of the 'with' statement the level is restored to its
+    original value.
+
+    Args:
+        level (int): The lowest logging level to capture.
+        logger_name (str): The name of the logger to capture.
+    """
+    assert logger_name.startswith(
+        "sqlfluff"
+    ), "This should only be used with a SQLFluff logger."
+    logger = logging.getLogger(logger_name)
+    handler = FluffLogHandler()
+    orig_level = logger.level
+    logger.setLevel(level)
+    logger.addHandler(handler)
+    try:
+        yield handler
+    finally:
+        logger.setLevel(orig_level)
+        logger.removeHandler(handler)
diff --git a/src/sqlfluff/utils/testing/rules.py b/src/sqlfluff/utils/testing/rules.py
index 0c5b52b..2c7e833 100644
--- a/src/sqlfluff/utils/testing/rules.py
+++ b/src/sqlfluff/utils/testing/rules.py
@@ -42,7 +42,7 @@ def load_test_cases(
         global_config = y.pop("configs", None)
         if global_config:
             for i in y:
-                if not ("configs" in y[i].keys()):
+                if "configs" not in y[i].keys():
                     y[i].update({"configs": global_config})
         ids.extend([rule + "_" + t for t in y])
         test_cases.extend([RuleTestCase(rule=rule, **v) for k, v in y.items()])
@@ -52,7 +52,7 @@ def load_test_cases(
 
 def get_rule_from_set(code, config):
     """Fetch a rule from the rule set."""
-    for r in get_ruleset().get_rulelist(config=config):
+    for r in get_ruleset().get_rulepack(config=config).rules:
         if r.code == code:  # pragma: no cover
             return r
     raise ValueError(f"{code!r} not in {get_ruleset()!r}")
diff --git a/test/api/classes_test.py b/test/api/classes_test.py
index 0f5c3c2..7a332f4 100644
--- a/test/api/classes_test.py
+++ b/test/api/classes_test.py
@@ -26,7 +26,7 @@ def test__api__linter_lint():
     tokens, _ = Lexer(dialect="ansi").lex(test_query)
     parsed = Parser(dialect="ansi").parse(tokens)
     violations = Linter(dialect="ansi").lint(parsed)
-    assert [v.rule.code for v in violations] == ["L009", "L010"]
+    assert [v.rule.code for v in violations] == ["CP01", "LT12"]
 
 
 def test__api__linter_fix():
diff --git a/test/api/info_test.py b/test/api/info_test.py
index 3b3d472..f487687 100644
--- a/test/api/info_test.py
+++ b/test/api/info_test.py
@@ -1,6 +1,7 @@
 """Test using sqlfluff to extract elements of queries."""
 
 import sqlfluff
+from sqlfluff.core.linter import RuleTuple
 
 
 def test__api__info_dialects():
@@ -14,4 +15,23 @@ def test__api__info_rules():
     """Basic linting of dialects."""
     rules = sqlfluff.list_rules()
     assert isinstance(rules, list)
-    assert ("L001", "Unnecessary trailing whitespace.") in rules
+    assert (
+        RuleTuple(
+            code="LT01",
+            name="layout.spacing",
+            description="Inappropriate Spacing.",
+            groups=("all", "core", "layout"),
+            aliases=(
+                "L001",
+                "L005",
+                "L006",
+                "L008",
+                "L023",
+                "L024",
+                "L039",
+                "L048",
+                "L071",
+            ),
+        )
+        in rules
+    )
diff --git a/test/api/simple_test.py b/test/api/simple_test.py
index 1186963..e83adfe 100644
--- a/test/api/simple_test.py
+++ b/test/api/simple_test.py
@@ -11,79 +11,91 @@ my_bad_query = "SeLEct  *, 1, blah as  fOO  from myTable"
 
 lint_result = [
     {
-        "code": "L010",
+        "code": "AM04",
+        "description": "Query produces an unknown number of result columns.",
         "line_no": 1,
         "line_pos": 1,
-        "description": "Keywords must be consistently upper case.",
+        "name": "ambiguous.column_count",
     },
     {
-        "code": "L036",
-        "description": "Select targets should be on a new line unless there is only "
-        "one select target.",
+        "code": "CP01",
         "line_no": 1,
         "line_pos": 1,
+        "description": "Keywords must be consistently upper case.",
+        "name": "capitalisation.keywords",
     },
     {
-        "code": "L044",
-        "description": "Query produces an unknown number of result columns.",
+        "code": "LT09",
+        "description": "Select targets should be on a new line unless there is only "
+        "one select target.",
         "line_no": 1,
         "line_pos": 1,
+        "name": "layout.select_targets",
     },
     {
-        "code": "L039",
+        "code": "LT01",
         "description": "Expected only single space before star '*'. Found '  '.",
         "line_no": 1,
         "line_pos": 7,
+        "name": "layout.spacing",
     },
     {
-        "code": "L013",
+        "code": "AL03",
         "line_no": 1,
         "line_pos": 12,
         "description": "Column expression without alias. Use explicit `AS` clause.",
+        "name": "aliasing.expression",
     },
     {
-        "code": "L010",
+        "code": "CP01",
         "line_no": 1,
         "line_pos": 20,
         "description": "Keywords must be consistently upper case.",
+        "name": "capitalisation.keywords",
     },
     {
-        "code": "L039",
+        "code": "LT01",
         "description": (
             "Expected only single space before naked identifier. Found '  '."
         ),
         "line_no": 1,
         "line_pos": 22,
+        "name": "layout.spacing",
     },
     {
-        "code": "L014",
+        "code": "CP02",
         "line_no": 1,
         "line_pos": 24,
         "description": "Unquoted identifiers must be consistently lower case.",
+        "name": "capitalisation.identifiers",
     },
     {
-        "code": "L039",
+        "code": "LT01",
         "description": "Expected only single space before 'from' keyword. Found '  '.",
         "line_no": 1,
         "line_pos": 27,
+        "name": "layout.spacing",
     },
     {
-        "code": "L010",
+        "code": "CP01",
         "line_no": 1,
         "line_pos": 29,
         "description": "Keywords must be consistently upper case.",
+        "name": "capitalisation.keywords",
     },
     {
-        "code": "L014",
+        "code": "CP02",
         "line_no": 1,
         "line_pos": 34,
         "description": "Unquoted identifiers must be consistently lower case.",
+        "name": "capitalisation.identifiers",
     },
     {
-        "code": "L009",
+        "code": "LT12",
         "line_no": 1,
         "line_pos": 41,
         "description": "Files must end with a single trailing newline.",
+        "name": "layout.end-of-file",
     },
 ]
 
@@ -106,7 +118,7 @@ def test__api__lint_string():
 
 def test__api__lint_string_specific():
     """Basic checking of lint functionality."""
-    rules = ["L014", "L009"]
+    rules = ["CP02", "LT12"]
     result = sqlfluff.lint(my_bad_query, rules=rules)
     # Check which rules are found
     assert all(elem["code"] in rules for elem in result)
@@ -114,7 +126,7 @@ def test__api__lint_string_specific():
 
 def test__api__lint_string_specific_single():
     """Basic checking of lint functionality."""
-    rules = ["L014"]
+    rules = ["CP02"]
     result = sqlfluff.lint(my_bad_query, rules=rules)
     # Check which rules are found
     assert all(elem["code"] in rules for elem in result)
@@ -122,27 +134,27 @@ def test__api__lint_string_specific_single():
 
 def test__api__lint_string_specific_exclude():
     """Basic checking of lint functionality."""
-    exclude_rules = ["L009", "L010", "L013", "L014", "L036", "L039"]
+    exclude_rules = ["LT12", "CP01", "AL03", "CP02", "LT09", "LT01"]
     result = sqlfluff.lint(my_bad_query, exclude_rules=exclude_rules)
-    # Check only L044 is found
+    # Check only AM04 is found
     assert len(result) == 1
-    assert "L044" == result[0]["code"]
+    assert "AM04" == result[0]["code"]
 
 
 def test__api__lint_string_specific_exclude_single():
     """Basic checking of lint functionality."""
-    exclude_rules = ["L039"]
+    exclude_rules = ["LT01"]
     result = sqlfluff.lint(my_bad_query, exclude_rules=exclude_rules)
-    # Check only L044 is found
+    # Check only AM04 is found
     assert len(result) == 9
-    set(["L009", "L010", "L013", "L014", "L036", "L044"]) == set(
+    set(["LT12", "CP01", "AL03", "CP02", "LT09", "AM04"]) == set(
         [r["code"] for r in result]
     )
 
 
 def test__api__lint_string_specific_exclude_all_failed_rules():
     """Basic checking of lint functionality."""
-    exclude_rules = ["L009", "L010", "L013", "L014", "L036", "L039", "L044"]
+    exclude_rules = ["LT12", "CP01", "AL03", "CP02", "LT09", "LT01", "AM04"]
     result = sqlfluff.lint(my_bad_query, exclude_rules=exclude_rules)
     # Check it passes
     assert result == []
@@ -167,14 +179,14 @@ FROM mytable
 
 def test__api__fix_string_specific():
     """Basic checking of lint functionality with a specific rule."""
-    result = sqlfluff.fix(my_bad_query, rules=["L010"])
+    result = sqlfluff.fix(my_bad_query, rules=["CP01"])
     # Check actual result
     assert result == "SELECT  *, 1, blah AS  fOO  FROM myTable"
 
 
 def test__api__fix_string_specific_exclude():
     """Basic checking of lint functionality with a specific rule exclusion."""
-    result = sqlfluff.fix(my_bad_query, exclude_rules=["L036"])
+    result = sqlfluff.fix(my_bad_query, exclude_rules=["LT09"])
     # Check actual result
     assert result == "SELECT *, 1, blah AS foo FROM mytable\n"
 
@@ -184,7 +196,7 @@ def test__api__fix_string_unparsable():
     bad_query = """SELECT my_col
 FROM my_schema.my_table
 where processdate ! 3"""
-    result = sqlfluff.fix(bad_query, rules=["L010"])
+    result = sqlfluff.fix(bad_query, rules=["CP01"])
     # Check fix result: should be unchanged because of the parse error.
     assert result == bad_query
 
@@ -194,7 +206,7 @@ def test__api__fix_string_unparsable_fix_even_unparsable():
     bad_query = """SELECT my_col
 FROM my_schema.my_table
 where processdate ! 3"""
-    result = sqlfluff.fix(bad_query, rules=["L010"], fix_even_unparsable=True)
+    result = sqlfluff.fix(bad_query, rules=["CP01"], fix_even_unparsable=True)
     # Check fix result: should be fixed because we overrode fix_even_unparsable.
     assert (
         result
@@ -268,8 +280,8 @@ def test__api__config_path():
         ),
         (
             # API overrides, so it uses that
-            dict(exclude_rules=["L027"]),
-            {"L029"},
+            dict(exclude_rules=["RF02"]),
+            {"RF04"},
         ),
     ],
 )
@@ -278,7 +290,7 @@ def test__api__config_override(kwargs, expected, tmpdir):
     config_path = "test/fixtures/api/config_override/.sqlfluff"
     sql = "SELECT TRIM(name) AS name FROM some_table"
     lint_results = sqlfluff.lint(sql, config_path=config_path, **kwargs)
-    assert expected == {"L027", "L029"}.intersection(
+    assert expected == {"RF02", "RF04"}.intersection(
         {lr["code"] for lr in lint_results}
     )
 
diff --git a/test/cli/autocomplete_test.py b/test/cli/autocomplete_test.py
index 0170162..6480348 100644
--- a/test/cli/autocomplete_test.py
+++ b/test/cli/autocomplete_test.py
@@ -8,7 +8,8 @@ from sqlfluff.cli.autocomplete import dialect_shell_complete
     "incomplete,expected",
     [
         ["an", ["ansi"]],
-        ["d", ["databricks", "db2"]],
+        ["d", ["databricks", "db2", "duckdb"]],
+        ["g", ["greenplum"]],
         ["s", ["snowflake", "soql", "sparksql", "sqlite"]],
         ["post", ["postgres"]],
     ],
diff --git a/test/cli/commands_test.py b/test/cli/commands_test.py
index 2048a0f..d1707f3 100644
--- a/test/cli/commands_test.py
+++ b/test/cli/commands_test.py
@@ -28,6 +28,7 @@ from sqlfluff.cli.commands import (
     version,
     rules,
     fix,
+    cli_format,
     parse,
     dialects,
     get_config,
@@ -73,10 +74,9 @@ def contains_ansi_escape(s: str) -> bool:
 
 
 expected_output = """== [test/fixtures/linter/indentation_error_simple.sql] FAIL
-L:   2 | P:   4 | L003 | Expected 1 indentation, found less than 1 [compared to
-                       | line 01]
-L:   5 | P:  10 | L010 | Keywords must be consistently upper case.
-L:   5 | P:  13 | L031 | Avoid aliases in from clauses and join conditions.
+L:   2 | P:   1 | LT02 | Expected indent of 4 spaces. [layout.indent]
+L:   5 | P:  10 | CP01 | Keywords must be consistently upper case.
+                       | [capitalisation.keywords]
 """
 
 
@@ -93,7 +93,7 @@ def test__cli__command_directed():
         ],
     )
     # We should get a readout of what the error was
-    check_a = "L:   2 | P:   4 | L003"
+    check_a = "L:   2 | P:   1 | LT02"
     # NB: Skip the number at the end because it's configurable
     check_b = "ndentation"
     assert check_a in result.output
@@ -270,7 +270,7 @@ def test__cli__command_render_stdin():
                 "-n",
                 "test/fixtures/cli/passing_b.sql",
                 "--exclude-rules",
-                "L051",
+                "AM05",
             ],
         ),
         # Basic render
@@ -293,7 +293,7 @@ def test__cli__command_render_stdin():
                 "test/fixtures/cli/passing_b.sql",
                 "-vvvvvvvvvvv",
                 "--exclude-rules",
-                "L051",
+                "AM05",
             ],
         ),
         # Test basic linting with specific logger.
@@ -307,7 +307,7 @@ def test__cli__command_render_stdin():
                 "--logger",
                 "parser",
                 "-e",
-                "L051",
+                "AM05",
             ],
         ),
         # Check basic parsing
@@ -317,7 +317,7 @@ def test__cli__command_render_stdin():
                 "-n",
                 "test/fixtures/cli/passing_b.sql",
                 "--exclude-rules",
-                "L051",
+                "AM05",
             ],
         ),
         # Test basic parsing with very high verbosity
@@ -328,7 +328,7 @@ def test__cli__command_render_stdin():
                 "test/fixtures/cli/passing_b.sql",
                 "-vvvvvvvvvvv",
                 "-e",
-                "L051",
+                "AM05",
             ],
         ),
         # Check basic parsing, with the code only option
@@ -336,6 +336,8 @@ def test__cli__command_render_stdin():
         # Check basic parsing, with the yaml output
         (parse, ["-n", "test/fixtures/cli/passing_b.sql", "-c", "-f", "yaml"]),
         (parse, ["-n", "test/fixtures/cli/passing_b.sql", "--format", "yaml"]),
+        # Check parsing with no output (used mostly for testing)
+        (parse, ["-n", "test/fixtures/cli/passing_b.sql", "--format", "none"]),
         # Check the profiler and benching commands
         (parse, ["-n", "test/fixtures/cli/passing_b.sql", "--profiler"]),
         (parse, ["-n", "test/fixtures/cli/passing_b.sql", "--bench"]),
@@ -346,7 +348,7 @@ def test__cli__command_render_stdin():
                 "test/fixtures/cli/passing_b.sql",
                 "--bench",
                 "--exclude-rules",
-                "L051",
+                "AM05",
             ],
         ),
         (
@@ -356,7 +358,7 @@ def test__cli__command_render_stdin():
                 "test/fixtures/cli/passing_b.sql",
                 "--bench",
                 "--exclude-rules",
-                "L051",
+                "AM05",
             ],
         ),
         # Check linting works in specifying rules
@@ -365,7 +367,7 @@ def test__cli__command_render_stdin():
             [
                 "-n",
                 "--rules",
-                "L001",
+                "CP01",
                 "test/fixtures/linter/operator_errors.sql",
             ],
         ),
@@ -375,7 +377,7 @@ def test__cli__command_render_stdin():
             [
                 "-n",
                 "--rules",
-                "L001,L002",
+                "CP01,LT02",
                 "test/fixtures/linter/operator_errors.sql",
             ],
         ),
@@ -385,9 +387,9 @@ def test__cli__command_render_stdin():
             [
                 "-n",
                 "--rules",
-                "L001,L006",
+                "CP01,LT01",
                 "--exclude-rules",
-                "L006,L031",
+                "LT01,AL07",
                 "test/fixtures/linter/operator_errors.sql",
             ],
         ),
@@ -397,7 +399,7 @@ def test__cli__command_render_stdin():
             [
                 "-n",
                 "--exclude-rules",
-                "L006,L007,L031,L039,L071",
+                "LT01,LT03,AL07",
                 "test/fixtures/linter/operator_errors.sql",
             ],
         ),
@@ -407,7 +409,7 @@ def test__cli__command_render_stdin():
             [
                 "-n",
                 "--exclude-rules",
-                "L003,L009,L031",
+                "LT02,LT12,AL07",
                 "--ignore",
                 "parsing,lexing",
                 "test/fixtures/linter/parse_lex_error.sql",
@@ -451,7 +453,7 @@ def test__cli__command_lint_parse(command):
                 fix,
                 [
                     "--rules",
-                    "L001",
+                    "LT01",
                     "test/fixtures/cli/fail_many.sql",
                     "-vvvvvvv",
                 ],
@@ -465,7 +467,7 @@ def test__cli__command_lint_parse(command):
                 fix,
                 [
                     "--rules",
-                    "L001",
+                    "LT01",
                     "--fixed-suffix",
                     "_fix",
                     "test/fixtures/cli/fail_many.sql",
@@ -487,6 +489,46 @@ def test__cli__command_lint_parse(command):
             ),
             1,
         ),
+        # Format
+        (
+            (
+                cli_format,
+                [
+                    "--fixed-suffix",
+                    "_fix",
+                    "test/fixtures/linter/whitespace_errors.sql",
+                ],
+            ),
+            0,
+        ),
+        # Format with --persist-timing
+        (
+            (
+                cli_format,
+                [
+                    "--fixed-suffix",
+                    "_fix",
+                    "test/fixtures/linter/whitespace_errors.sql",
+                    "--persist-timing",
+                    "test.csv",
+                ],
+            ),
+            0,
+        ),
+        # Format (specifying rules)
+        (
+            (
+                cli_format,
+                [
+                    "--rules",
+                    "LT01",
+                    "--fixed-suffix",
+                    "_fix",
+                    "test/fixtures/linter/whitespace_errors.sql",
+                ],
+            ),
+            2,
+        ),
         # Template syntax error in macro file
         (
             (
@@ -503,6 +545,18 @@ def test__cli__command_lint_parse(command):
             ),
             1,
         ),
+        # Test a longer lint fail with --bench
+        # This tests the threshold rules clause
+        (
+            (
+                lint,
+                [
+                    "test/fixtures/linter/autofix/bigquery/004_templating/before.sql",
+                    "--bench",
+                ],
+            ),
+            1,
+        ),
     ],
 )
 def test__cli__command_lint_parse_with_retcode(command, ret_code):
@@ -534,14 +588,14 @@ def test__cli__command_lint_skip_ignore_files():
         ],
     )
     assert result.exit_code == 1
-    assert "L009" in result.output.strip()
+    assert "LT12" in result.output.strip()
 
 
 def test__cli__command_lint_ignore_local_config():
     """Test that --ignore-local_config ignores .sqlfluff file as expected."""
     runner = CliRunner()
     # First we test that not including the --ignore-local-config includes
-    # .sqlfluff file, and therefore the lint doesn't raise L012
+    # .sqlfluff file, and therefore the lint doesn't raise AL02
     result = runner.invoke(
         lint,
         [
@@ -549,9 +603,9 @@ def test__cli__command_lint_ignore_local_config():
         ],
     )
     assert result.exit_code == 0
-    assert "L012" not in result.output.strip()
+    assert "AL02" not in result.output.strip()
     # Then repeat the same lint but this time ignoring the .sqlfluff file.
-    # We should see L012 raised.
+    # We should see AL02 raised.
     result = runner.invoke(
         lint,
         [
@@ -561,7 +615,7 @@ def test__cli__command_lint_ignore_local_config():
         ],
     )
     assert result.exit_code == 1
-    assert "L012" in result.output.strip()
+    assert "AL02" in result.output.strip()
 
 
 def test__cli__command_lint_warning():
@@ -586,7 +640,7 @@ def test__cli__command_lint_warning():
     # But should also contain the warnings.
     # NOTE: Not including the whole description because it's too long.
     assert (
-        "L:   4 | P:   9 | L006 | WARNING: Expected single whitespace"
+        "L:   4 | P:   9 | LT01 | WARNING: Expected single whitespace"
         in result.output.strip()
     )
 
@@ -693,11 +747,11 @@ def generic_roundtrip_test(
 @pytest.mark.parametrize(
     "rule,fname",
     [
-        ("L001", "test/fixtures/linter/indentation_errors.sql"),
-        ("L008", "test/fixtures/linter/whitespace_errors.sql"),
-        ("L008", "test/fixtures/linter/indentation_errors.sql"),
+        ("LT01", "test/fixtures/linter/indentation_errors.sql"),
+        ("LT01", "test/fixtures/linter/whitespace_errors.sql"),
+        ("LT01", "test/fixtures/linter/indentation_errors.sql"),
         # Really stretching the ability of the fixer to re-indent a file
-        ("L003", "test/fixtures/linter/indentation_error_hard.sql"),
+        ("LT02", "test/fixtures/linter/indentation_error_hard.sql"),
     ],
 )
 def test__cli__command__fix(rule, fname):
@@ -717,7 +771,7 @@ def test__cli__command__fix(rule, fname):
             FROM my_schema.my_table
             where processdate ! 3
             """,
-            ["--force", "--fixed-suffix", "FIXED", "--rules", "L010"],
+            ["--force", "--fixed-suffix", "FIXED", "--rules", "CP01"],
             None,
             1,
         ),
@@ -730,7 +784,7 @@ def test__cli__command__fix(rule, fname):
             where processdate {{ condition }}
             """,
             # Test the short versions of the options.
-            ["--force", "-x", "FIXED", "-r", "L010"],
+            ["--force", "-x", "FIXED", "-r", "CP01"],
             None,
             1,
         ),
@@ -744,7 +798,7 @@ def test__cli__command__fix(rule, fname):
             where processdate ! 3  -- noqa: PRS
             """,
             # Test the short versions of the options.
-            ["--force", "-x", "FIXED", "-r", "L010"],
+            ["--force", "-x", "FIXED", "-r", "CP01"],
             None,
             1,
         ),
@@ -756,7 +810,7 @@ def test__cli__command__fix(rule, fname):
             FROM my_schema.my_table
             WHERE processdate ! 3
             """,
-            ["--force", "--fixed-suffix", "FIXED", "--rules", "L010"],
+            ["--force", "--fixed-suffix", "FIXED", "--rules", "CP01"],
             None,
             1,
         ),
@@ -768,7 +822,7 @@ def test__cli__command__fix(rule, fname):
             FROM my_schema.my_table
             WHERE processdate ! 3  --noqa: PRS
             """,
-            ["--force", "--fixed-suffix", "FIXED", "--rules", "L010"],
+            ["--force", "--fixed-suffix", "FIXED", "--rules", "CP01"],
             None,
             0,
         ),
@@ -786,7 +840,7 @@ def test__cli__command__fix(rule, fname):
                 "--fixed-suffix",
                 "FIXED",
                 "--rules",
-                "L010",
+                "CP01",
                 "--FIX-EVEN-UNPARSABLE",
             ],
             """
@@ -816,7 +870,7 @@ def test__cli__command__fix(rule, fname):
                 FROM my_schema.my_table
                 where processdate != 3""",
             ],
-            ["--force", "--fixed-suffix", "FIXED", "--rules", "L010"],
+            ["--force", "--fixed-suffix", "FIXED", "--rules", "CP01"],
             [
                 None,
                 """SELECT my_col
@@ -964,10 +1018,10 @@ def _mock_eval(rule, context):
         ("-- noqa: disable=all\n-- Comment A\nSELECT 1 FROM foo", 0),
     ],
 )
-@patch("sqlfluff.rules.L001.Rule_L001._eval", _mock_eval)
+@patch("sqlfluff.rules.layout.LT01.Rule_LT01._eval", _mock_eval)
 def test__cli__fix_loop_limit_behavior(sql, exit_code, tmpdir):
     """Tests how "fix" behaves when the loop limit is exceeded."""
-    fix_args = ["--force", "--fixed-suffix", "FIXED", "--rules", "L001"]
+    fix_args = ["--force", "--fixed-suffix", "FIXED", "--rules", "LT01"]
     tmp_path = pathlib.Path(str(tmpdir))
     filepath = tmp_path / "testing.sql"
     filepath.write_text(textwrap.dedent(sql))
@@ -989,39 +1043,15 @@ def test__cli__fix_loop_limit_behavior(sql, exit_code, tmpdir):
     assert not fixed_path.is_file()
 
 
-# Test case disabled because there isn't a good example of where to test this.
-# This *should* test the case where a rule DOES have a proposed fix, but for
-# some reason when we try to apply it, there's a failure.
-# @pytest.mark.parametrize('rule,fname', [
-#     # NB: L004 currently has no fix routine.
-#     ('L004', 'test/fixtures/linter/indentation_errors.sql')
-# ])
-# def test__cli__command__fix_fail(rule, fname):
-#     """Test the round trip of detecting, fixing and then still detecting the rule."""
-#     with open(fname, mode='r') as test_file:
-#         generic_roundtrip_test(test_file, rule, fix_exit_code=1, final_exit_code=65)
-
-
 @pytest.mark.parametrize(
     "stdin,rules,stdout",
     [
-        ("select * from t", "L003", "select * from t"),  # no change
+        ("select * from t", "LT02", "select * from t"),  # no change
         (
             " select * from t",
-            "L003",
+            "LT02",
             "select * from t",
         ),  # fix preceding whitespace
-        # L031 fix aliases in joins
-        (
-            "SELECT u.id, c.first_name, c.last_name, COUNT(o.user_id) "
-            "FROM users as u JOIN customers as c on u.id = c.user_id JOIN orders as o "
-            "on u.id = o.user_id;",
-            "L031",
-            "SELECT users.id, customers.first_name, customers.last_name, "
-            "COUNT(orders.user_id) "
-            "FROM users JOIN customers on users.id = customers.user_id JOIN orders on "
-            "users.id = orders.user_id;",
-        ),
     ],
 )
 def test__cli__command_fix_stdin(stdin, rules, stdout):
@@ -1036,6 +1066,28 @@ def test__cli__command_fix_stdin(stdin, rules, stdout):
     assert result.output == stdout
 
 
+@pytest.mark.parametrize(
+    "stdin,stdout",
+    [
+        ("select * from t\n", "select * from t\n"),  # no change
+        (
+            "   select    *    FRoM     t    ",
+            "select * from t\n",
+        ),
+    ],
+)
+def test__cli__command_format_stdin(stdin, stdout):
+    """Check stdin input for fix works."""
+    result = invoke_assert_code(
+        args=[
+            cli_format,
+            ("-", "--disable-progress-bar", "--dialect=ansi"),
+        ],
+        cli_input=stdin,
+    )
+    assert result.output == stdout
+
+
 def test__cli__command_fix_stdin_logging_to_stderr(monkeypatch):
     """Check that logging goes to stderr when stdin is passed to fix."""
     perfect_sql = "select col from table"
@@ -1048,7 +1100,7 @@ def test__cli__command_fix_stdin_logging_to_stderr(monkeypatch):
 
     monkeypatch.setattr(sqlfluff.cli.commands, "Linter", MockLinter)
     result = invoke_assert_code(
-        args=[fix, ("-", "--rules=L003", "--dialect=ansi")],
+        args=[fix, ("-", "--rules=LT02", "--dialect=ansi")],
         cli_input=perfect_sql,
         mix_stderr=False,
     )
@@ -1110,8 +1162,8 @@ def test__cli__command_fix_stdin_error_exit_code(
 @pytest.mark.parametrize(
     "rule,fname,prompt,exit_code,fix_exit_code",
     [
-        ("L001", "test/fixtures/linter/indentation_errors.sql", "y", 0, 0),
-        ("L001", "test/fixtures/linter/indentation_errors.sql", "n", 1, 1),
+        ("LT01", "test/fixtures/linter/indentation_errors.sql", "y", 0, 0),
+        ("LT01", "test/fixtures/linter/indentation_errors.sql", "n", 1, 1),
     ],
 )
 def test__cli__command__fix_no_force(rule, fname, prompt, exit_code, fix_exit_code):
@@ -1163,7 +1215,7 @@ def test__cli__command_parse_serialize_from_stdin(serialize, write_file, tmp_pat
     assert result["filepath"] == "stdin"
 
 
-@pytest.mark.parametrize("serialize", ["yaml", "json"])
+@pytest.mark.parametrize("serialize", ["yaml", "json", "none"])
 @pytest.mark.parametrize(
     "sql,expected,exit_code",
     [
@@ -1175,16 +1227,18 @@ def test__cli__command_parse_serialize_from_stdin(serialize, write_file, tmp_pat
                     "filepath": "stdin",
                     "violations": [
                         {
-                            "code": "L010",
+                            "code": "CP01",
                             "line_no": 1,
                             "line_pos": 1,
                             "description": "Keywords must be consistently upper case.",
+                            "name": "capitalisation.keywords",
                         },
                         {
-                            "code": "L010",
+                            "code": "CP01",
                             "line_no": 1,
                             "line_pos": 10,
                             "description": "Keywords must be consistently upper case.",
+                            "name": "capitalisation.keywords",
                         },
                     ],
                 }
@@ -1201,7 +1255,7 @@ def test__cli__command_lint_serialize_from_stdin(serialize, sql, expected, exit_
             (
                 "-",
                 "--rules",
-                "L010",
+                "CP01",
                 "--format",
                 serialize,
                 "--disable-progress-bar",
@@ -1216,6 +1270,8 @@ def test__cli__command_lint_serialize_from_stdin(serialize, sql, expected, exit_
         assert json.loads(result.output) == expected
     elif serialize == "yaml":
         assert yaml.safe_load(result.output) == expected
+    elif serialize == "none":
+        assert result.output == ""
     else:
         raise Exception
 
@@ -1268,7 +1324,7 @@ def test__cli__command_lint_nocolor(isatty, should_strip_ansi, capsys, tmpdir):
 
 @pytest.mark.parametrize(
     "serialize",
-    ["human", "yaml", "json", "github-annotation", "github-annotation-native"],
+    ["human", "yaml", "json", "github-annotation", "github-annotation-native", "none"],
 )
 @pytest.mark.parametrize("write_file", [None, "outfile"])
 def test__cli__command_lint_serialize_multiple_files(serialize, write_file, tmp_path):
@@ -1301,7 +1357,8 @@ def test__cli__command_lint_serialize_multiple_files(serialize, write_file, tmp_
         ret_code=1,
     )
 
-    if write_file:
+    # NOTE: The "none" serializer doesn't write a file even if specified.
+    if write_file and serialize != "none":
         with open(target_file, "r") as payload_file:
             result_payload = payload_file.read()
     else:
@@ -1309,12 +1366,15 @@ def test__cli__command_lint_serialize_multiple_files(serialize, write_file, tmp_
 
     # Print for debugging.
     payload_length = len(result_payload.split("\n"))
-    print(f"## Payload (length {payload_length}):")
+    print("=== BEGIN RESULT OUTPUT")
     print(result_payload)
-    print("## End Payload")
+    print("=== END RESULT OUTPUT")
+    print("Result length:", payload_length)
 
     if serialize == "human":
-        assert payload_length == 26 if write_file else 32
+        assert payload_length == 23 if write_file else 32
+    elif serialize == "none":
+        assert payload_length == 1  # There will be a single newline.
     elif serialize == "json":
         result = json.loads(result_payload)
         assert len(result) == 2
@@ -1330,7 +1390,7 @@ def test__cli__command_lint_serialize_multiple_files(serialize, write_file, tmp_
         # SQLFluff produces trailing newline
         if result[-1] == "":
             del result[-1]
-        assert len(result) == 17
+        assert len(result) == 11
     else:
         raise Exception
 
@@ -1360,11 +1420,11 @@ def test__cli__command_lint_serialize_github_annotation():
             "file": os.path.normpath(
                 "test/fixtures/linter/identifier_capitalisation.sql"
             ),
-            "line": 1,
-            "message": "L036: Select targets should be on a new line unless there is "
-            "only one select target.",
-            "start_column": 1,
-            "end_column": 1,
+            "line": 2,
+            "message": "RF02: Unqualified reference 'foo' found in select with more "
+            "than one referenced table/view.",
+            "start_column": 5,
+            "end_column": 5,
             "title": "SQLFluff",
         },
         {
@@ -1373,11 +1433,10 @@ def test__cli__command_lint_serialize_github_annotation():
             "file": os.path.normpath(
                 "test/fixtures/linter/identifier_capitalisation.sql"
             ),
-            "line": 2,
-            "message": "L027: Unqualified reference 'foo' found in select with more "
-            "than one referenced table/view.",
-            "start_column": 5,
-            "end_column": 5,
+            "line": 3,
+            "message": "LT02: Expected indent of 8 spaces.",
+            "start_column": 1,
+            "end_column": 1,
             "title": "SQLFluff",
         },
         {
@@ -1387,7 +1446,7 @@ def test__cli__command_lint_serialize_github_annotation():
                 "test/fixtures/linter/identifier_capitalisation.sql"
             ),
             "line": 3,
-            "message": "L012: Implicit/explicit aliasing of columns.",
+            "message": "AL02: Implicit/explicit aliasing of columns.",
             "start_column": 5,
             "end_column": 5,
             "title": "SQLFluff",
@@ -1399,7 +1458,7 @@ def test__cli__command_lint_serialize_github_annotation():
                 "test/fixtures/linter/identifier_capitalisation.sql"
             ),
             "line": 3,
-            "message": "L014: Unquoted identifiers must be consistently lower case.",
+            "message": "CP02: Unquoted identifiers must be consistently lower case.",
             "start_column": 5,
             "end_column": 5,
             "title": "SQLFluff",
@@ -1411,7 +1470,7 @@ def test__cli__command_lint_serialize_github_annotation():
                 "test/fixtures/linter/identifier_capitalisation.sql"
             ),
             "line": 4,
-            "message": "L010: Keywords must be consistently lower case.",
+            "message": "CP01: Keywords must be consistently lower case.",
             "start_column": 1,
             "end_column": 1,
             "title": "SQLFluff",
@@ -1423,7 +1482,7 @@ def test__cli__command_lint_serialize_github_annotation():
                 "test/fixtures/linter/identifier_capitalisation.sql"
             ),
             "line": 4,
-            "message": "L014: Unquoted identifiers must be consistently lower case.",
+            "message": "CP02: Unquoted identifiers must be consistently lower case.",
             "start_column": 12,
             "end_column": 12,
             "title": "SQLFluff",
@@ -1435,7 +1494,7 @@ def test__cli__command_lint_serialize_github_annotation():
                 "test/fixtures/linter/identifier_capitalisation.sql"
             ),
             "line": 4,
-            "message": "L014: Unquoted identifiers must be consistently lower case.",
+            "message": "CP02: Unquoted identifiers must be consistently lower case.",
             "start_column": 18,
             "end_column": 18,
             "title": "SQLFluff",
@@ -1466,22 +1525,25 @@ def test__cli__command_lint_serialize_github_annotation_native():
 
     assert result.output == "\n".join(
         [
-            f"::error title=SQLFluff,file={fpath_normalised},line=1,col=1::"
-            "L036: Select targets should be on a new line unless there is only one "
-            "select target.",
             f"::error title=SQLFluff,file={fpath_normalised},line=2,col=5::"
-            "L027: Unqualified reference 'foo' found in select with more than one "
-            "referenced table/view.",
+            "RF02: Unqualified reference 'foo' found in select with more than one "
+            "referenced table/view. [references.qualification]",
+            f"::error title=SQLFluff,file={fpath_normalised},line=3,col=1::"
+            "LT02: Expected indent of 8 spaces. [layout.indent]",
             f"::error title=SQLFluff,file={fpath_normalised},line=3,col=5::"
-            "L012: Implicit/explicit aliasing of columns.",
+            "AL02: Implicit/explicit aliasing of columns. [aliasing.column]",
             f"::error title=SQLFluff,file={fpath_normalised},line=3,col=5::"
-            "L014: Unquoted identifiers must be consistently lower case.",
+            "CP02: Unquoted identifiers must be consistently lower case. "
+            "[capitalisation.identifiers]",
             f"::error title=SQLFluff,file={fpath_normalised},line=4,col=1::"
-            "L010: Keywords must be consistently lower case.",
+            "CP01: Keywords must be consistently lower case. "
+            "[capitalisation.keywords]",
             f"::error title=SQLFluff,file={fpath_normalised},line=4,col=12::"
-            "L014: Unquoted identifiers must be consistently lower case.",
+            "CP02: Unquoted identifiers must be consistently lower case. "
+            "[capitalisation.identifiers]",
             f"::error title=SQLFluff,file={fpath_normalised},line=4,col=18::"
-            "L014: Unquoted identifiers must be consistently lower case.",
+            "CP02: Unquoted identifiers must be consistently lower case. "
+            "[capitalisation.identifiers]",
             "",  # SQLFluff produces trailing newline
         ]
     )
@@ -1547,7 +1609,7 @@ def test_encoding(encoding_in, encoding_out):
     with open("test/fixtures/linter/indentation_errors.sql", "r") as testfile:
         generic_roundtrip_test(
             testfile,
-            "L001",
+            "LT01",
             input_file_encoding=encoding_in,
             output_file_encoding=encoding_out,
         )
@@ -1615,7 +1677,7 @@ def test_cli_disable_noqa_flag():
     raw_output = repr(result.output)
 
     # Linting error is raised even though it is inline ignored.
-    assert r"L:   5 | P:  11 | L010 |" in raw_output
+    assert r"L:   5 | P:  11 | CP01 |" in raw_output
 
 
 def test_cli_get_default_config():
@@ -1703,8 +1765,8 @@ class TestProgressBars:
         raw_output = repr(result.output)
 
         assert r"\rlint by rules:" in raw_output
-        assert r"\rrule L001:" in raw_output
-        assert r"\rrule L049:" in raw_output
+        assert r"\rrule LT01:" in raw_output
+        assert r"\rrule CV05:" in raw_output
 
     def test_cli_lint_enabled_progress_bar_multiple_paths(
         self, mock_disable_progress_bar: MagicMock
@@ -1733,8 +1795,8 @@ class TestProgressBars:
             in raw_output
         )
         assert r"\rlint by rules:" in raw_output
-        assert r"\rrule L001:" in raw_output
-        assert r"\rrule L049:" in raw_output
+        assert r"\rrule LT01:" in raw_output
+        assert r"\rrule CV05:" in raw_output
 
     def test_cli_lint_enabled_progress_bar_multiple_files(
         self, mock_disable_progress_bar: MagicMock
@@ -1772,8 +1834,8 @@ class TestProgressBars:
             in raw_output
         )
         assert r"\rlint by rules:" in raw_output
-        assert r"\rrule L001:" in raw_output
-        assert r"\rrule L049:" in raw_output
+        assert r"\rrule LT01:" in raw_output
+        assert r"\rrule CV05:" in raw_output
 
     def test_cli_fix_disabled_progress_bar(
         self, mock_disable_progress_bar: MagicMock
@@ -1818,7 +1880,7 @@ class TestProgressBars:
 
 multiple_expected_output = """==== finding fixable violations ====
 == [test/fixtures/linter/multiple_sql_errors.sql] FAIL
-L:  12 | P:   1 | L003 | Expected 1 indentation, found 0 [compared to line 10]
+L:  12 | P:   1 | LT02 | Expected indent of 4 spaces. [layout.indent]
 ==== fixing violations ====
 1 fixable linting violations found
 Are you sure you wish to attempt to fix these? [Y/n] ...
@@ -1866,24 +1928,21 @@ def test__cli__fix_multiple_errors_show_errors():
     assert check_a in result.output
     # Finally check the WHOLE output to make sure that unexpected newlines are not
     # added. The replace command just accounts for cross platform testing.
+    assert "L:  12 | P:   1 | LT02 | Expected indent of 4 spaces." in result.output
     assert (
-        "L:  12 | P:   1 | L003 | Expected 1 indentation, found 0 [compared to line 10]"
-        in result.output
-    )
-    assert (
-        "L:  36 | P:   9 | L027 | Unqualified reference 'package_id' found in "
+        "L:  36 | P:   9 | RF02 | Unqualified reference 'package_id' found in "
         "select with more than" in result.output
     )
     assert (
-        "L:  45 | P:  17 | L027 | Unqualified reference 'owner_type' found in "
+        "L:  45 | P:  17 | RF02 | Unqualified reference 'owner_type' found in "
         "select with more than" in result.output
     )
     assert (
-        "L:  45 | P:  50 | L027 | Unqualified reference 'app_key' found in "
+        "L:  45 | P:  50 | RF02 | Unqualified reference 'app_key' found in "
         "select with more than one" in result.output
     )
     assert (
-        "L:  42 | P:  45 | L027 | Unqualified reference 'owner_id' found in "
+        "L:  42 | P:  45 | RF02 | Unqualified reference 'owner_id' found in "
         "select with more than" in result.output
     )
 
diff --git a/test/cli/formatters_test.py b/test/cli/formatters_test.py
index f398ce8..c996ecb 100644
--- a/test/cli/formatters_test.py
+++ b/test/cli/formatters_test.py
@@ -45,7 +45,7 @@ def test__cli__formatters__violation(tmpdir):
             TemplatedFile.from_string("      \n\n  foobarbar"),
         ),
     )
-    r = RuleGhost("A", "DESC")
+    r = RuleGhost("A", "some-name", "DESC")
     v = SQLLintError(segment=s, rule=r)
     formatter = OutputStreamFormatter(
         FileOutput(FluffConfig(require_dialect=False), str(tmpdir / "out.txt")), False
@@ -56,7 +56,7 @@ def test__cli__formatters__violation(tmpdir):
     # it's at the third position in that line (i.e. there
     # are two characters between it and the preceding
     # newline).
-    assert escape_ansi(f) == "L:   3 | P:   3 |    A | DESC"
+    assert escape_ansi(f) == "L:   3 | P:   3 |    A | DESC [some-name]"
 
 
 def test__cli__helpers__colorize(tmpdir):
diff --git a/test/conftest.py b/test/conftest.py
index e3a0e8f..7991c62 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -90,6 +90,9 @@ def process_struct(obj):
     if isinstance(obj, dict):
         return tuple((k, process_struct(obj[k])) for k in obj)
     elif isinstance(obj, list):
+        # If empty list, return empty tuple
+        if not len(obj):
+            return tuple()
         # We'll assume that it's a list of dicts
         if isinstance(obj[0], dict):
             buff = [process_struct(elem) for elem in obj]
diff --git a/test/core/config_test.py b/test/core/config_test.py
index 59a84ce..b80e528 100644
--- a/test/core/config_test.py
+++ b/test/core/config_test.py
@@ -2,6 +2,7 @@
 
 import os
 import sys
+import logging
 
 from sqlfluff.core import config, Linter, FluffConfig
 from sqlfluff.core.config import (
@@ -17,6 +18,7 @@ from sqlfluff.core.templaters import (
     JinjaTemplater,
     PlaceholderTemplater,
 )
+from sqlfluff.utils.testing.logging import fluff_log_catcher
 
 from pathlib import Path
 from unittest.mock import patch, call
@@ -30,12 +32,25 @@ config_a = {
 }
 
 config_b = {
-    "core": {"rules": "L007", "dialect": "ansi"},
+    "core": {"rules": "LT03", "dialect": "ansi"},
     "layout": {
         "type": {"comma": {"line_position": "trailing", "spacing_before": "touch"}}
     },
 }
 
+config_c = {
+    "core": {"rules": "LT03", "dialect": "ansi"},
+    # NOTE:
+    # - NOT_A_RULE doesn't match anything.
+    # - L001 is an alias, but no longer a rule.
+    # - layout is a group and but doesn't match any individual rule.
+    "rules": {
+        "NOT_A_RULE": {"foo": "bar"},
+        "L001": {"foo": "bar"},
+        "layout": {"foo": "bar"},
+    },
+}
+
 
 @pytest.fixture
 def mock_xdg_home(monkeypatch):
@@ -130,10 +145,12 @@ def test__config__load_toml():
             "testing_bar": 7.698,
             "testing_bool": False,
             "testing_arr": ["a", "b", "c"],
+            "rules": ["LT03", "LT09"],
             "testing_inline_table": {"x": 1},
         },
         "bar": {"foo": "foobar"},
         "fnarr": {"fnarr": {"foo": "foobar"}},
+        "rules": {"capitalisation.keywords": {"capitalisation_policy": "upper"}},
     }
 
 
@@ -198,19 +215,33 @@ def test__config__nested_config_tests():
     test.
     """
     lntr = Linter(
-        config=FluffConfig(overrides=dict(exclude_rules="L002", dialect="ansi"))
+        # Exclude CP02 in overrides (similar to cli --exclude-rules)
+        config=FluffConfig(overrides=dict(exclude_rules="CP02", dialect="ansi"))
     )
     lnt = lntr.lint_path("test/fixtures/config/inheritance_b")
     violations = lnt.check_tuples(by_path=True)
     for k in violations:
         if k.endswith("nested\\example.sql"):
-            assert ("L003", 1, 4) in violations[k]
-            assert ("L009", 1, 12) in violations[k]
-            assert "L002" not in [c[0] for c in violations[k]]
+            # CP01 is enabled in the .sqlfluff file and not excluded.
+            assert ("CP01", 1, 4) in violations[k]
+            # LT02 is enabled in the .sqlfluff file and not excluded.
+            assert ("LT02", 1, 1) in violations[k]
+            # CP02 is enabled in the .sqlfluff file but excluded by the
+            # override above.
+            assert "CP02" not in [c[0] for c in violations[k]]
         elif k.endswith("inheritance_b\\example.sql"):
-            assert ("L003", 1, 4) in violations[k]
-            assert "L002" not in [c[0] for c in violations[k]]
-            assert "L009" not in [c[0] for c in violations[k]]
+            # CP01 is enabled because while disabled in the tox.ini file,
+            # the exclude-rules option is overridden by the override above
+            # which effectively sets the exclude to CP02 and in effect
+            # re-enables CP01.
+            # This may seem counter-intuitive but is in line with current
+            # documentation on how to use `rules` and `exclude-rules`.
+            # https://docs.sqlfluff.com/en/latest/configuration.html#enabling-and-disabling-rules
+            assert ("CP01", 1, 4) in violations[k]
+            # CP02 is disabled because of the override above.
+            assert "CP02" not in [c[0] for c in violations[k]]
+            # LT02 is disabled because it is not in the `rules` of tox.ini
+            assert "LT02" not in [c[0] for c in violations[k]]
 
 
 @patch("os.path.exists")
@@ -250,13 +281,21 @@ def test__config__load_user_appdir_config(
 @pytest.mark.parametrize(
     "raw_str, expected",
     [
-        ("L011,L022,L031", ["L011", "L022", "L031"]),
-        ("\nL011,\nL022,\nL031,", ["L011", "L022", "L031"]),
+        ("AL01,LT08,AL07", ["AL01", "LT08", "AL07"]),
+        ("\nAL01,\nLT08,\nAL07,", ["AL01", "LT08", "AL07"]),
+        (["AL01", "LT08", "AL07"], ["AL01", "LT08", "AL07"]),
     ],
 )
 def test__config__split_comma_separated_string(raw_str, expected):
-    """Tests that comma separated string config is handled correctly."""
-    assert config._split_comma_separated_string(raw_str) == expected
+    """Tests that string and lists are output correctly."""
+    assert config.split_comma_separated_string(raw_str) == expected
+
+
+def test__config__split_comma_separated_string_correct_type():
+    """Tests that invalid data types throw the correct error."""
+    with pytest.raises(SQLFluffUserError):
+        config.split_comma_separated_string(1)
+        config.split_comma_separated_string(True)
 
 
 def test__config__templater_selection():
@@ -282,11 +321,11 @@ def test__config__glob_exclude_config_tests():
     lnt = lntr.lint_path("test/fixtures/config/glob_exclude/test.sql")
     violations = lnt.check_tuples(by_path=True)
     for k in violations:
-        assert ("L044", 10, 1) in violations[k]
-        assert "L027" not in [c[0] for c in violations[k]]
-        assert "L050" not in [c[0] for c in violations[k]]
-        assert "L051" not in [c[0] for c in violations[k]]
-        assert "L052" not in [c[0] for c in violations[k]]
+        assert ("AM04", 12, 1) in violations[k]
+        assert "RF02" not in [c[0] for c in violations[k]]
+        assert "LT13" not in [c[0] for c in violations[k]]
+        assert "AM05" not in [c[0] for c in violations[k]]
+        assert "CV06" not in [c[0] for c in violations[k]]
 
 
 def test__config__glob_include_config_tests():
@@ -299,11 +338,11 @@ def test__config__glob_include_config_tests():
     lnt = lntr.lint_path("test/fixtures/config/glob_include/test.sql")
     violations = lnt.check_tuples(by_path=True)
     for k in violations:
-        assert ("L050", 1, 1) in violations[k]
-        assert ("L051", 12, 1) in violations[k]
-        assert ("L052", 12, 9) in violations[k]
-        assert ("L027", 10, 8) in violations[k]
-        assert "L044" not in [c[0] for c in violations[k]]
+        assert ("LT13", 1, 1) in violations[k]
+        assert ("AM05", 14, 1) in violations[k]
+        assert ("CV06", 14, 9) in violations[k]
+        assert ("RF02", 12, 8) in violations[k]
+        assert "AM04" not in [c[0] for c in violations[k]]
 
 
 def test__config__rules_set_to_none():
@@ -317,9 +356,9 @@ def test__config__rules_set_to_none():
     lnt = lntr.lint_path("test/fixtures/config/rules_set_to_none/test.sql")
     violations = lnt.check_tuples(by_path=True)
     for k in violations:
-        assert ("L050", 1, 1) in violations[k]
-        assert ("L044", 12, 1) in violations[k]
-        assert ("L010", 12, 10) in violations[k]
+        assert ("LT13", 1, 1) in violations[k]
+        assert ("AM04", 12, 1) in violations[k]
+        assert ("CP01", 12, 10) in violations[k]
 
 
 def test__config__rules_group_with_exclude():
@@ -330,15 +369,15 @@ def test__config__rules_group_with_exclude():
     lnt = lntr.lint_path("test/fixtures/config/rules_group_with_exclude/test.sql")
     violations = lnt.check_tuples(by_path=True)
     for k in violations:
-        assert ("L010", 15, 1) in violations[k]
-        assert "L019" not in [c[0] for c in violations[k]]
+        assert ("CP01", 15, 1) in violations[k]
+        assert "LT04" not in [c[0] for c in violations[k]]
 
 
 def test__config__get_section():
     """Test FluffConfig.get_section method."""
     cfg = FluffConfig(config_b)
 
-    assert cfg.get_section("core").get("rules", None) == "L007"
+    assert cfg.get_section("core").get("rules", None) == "LT03"
     assert cfg.get_section(["layout", "type", "comma"]) == {
         "line_position": "trailing",
         "spacing_before": "touch",
@@ -350,7 +389,7 @@ def test__config__get():
     """Test FluffConfig.get method."""
     cfg = FluffConfig(config_b)
 
-    assert cfg.get("rules") == "L007"
+    assert cfg.get("rules") == "LT03"
     assert cfg.get("rulez") is None
     assert cfg.get("rulez", section="core", default=123) == 123
     assert (
@@ -368,14 +407,14 @@ def test__config__from_kwargs():
     # Instantiate config object.
     cfg = FluffConfig.from_kwargs(
         dialect="snowflake",
-        rules=["L001", "L002"],
-        exclude_rules=["L010", "L011"],
+        rules=["LT01", "LT02"],
+        exclude_rules=["CP01", "AL01"],
     )
 
     # Verify we can later retrieve the config values.
     assert cfg.get("dialect") == "snowflake"
-    assert cfg.get("rules") == "L001,L002"
-    assert cfg.get("exclude_rules") == "L010,L011"
+    assert cfg.get("rules") == "LT01,LT02"
+    assert cfg.get("exclude_rules") == "CP01,AL01"
 
 
 def test__config_missing_dialect():
@@ -423,7 +462,7 @@ def test__config__validate_configs_indirect():
 def test__config__validate_configs_precedence_same_file():
     """Test _validate_configs method of FluffConfig where there's a conflict."""
     # Check with a known conflicted value
-    old_key = ("rules", "L007", "operator_new_lines")
+    old_key = ("rules", "LT03", "operator_new_lines")
     new_key = ("layout", "type", "binary_operator", "line_position")
     # Check it's still conflicted.
     assert any(
@@ -439,3 +478,69 @@ def test__config__validate_configs_precedence_same_file():
     assert len(res) == 1
     # Check that the old key isn't there.
     assert not any(k == old_key for k, _ in res)
+
+
+def test__config__toml_list_config():
+    """Test Parsing TOML list of values."""
+    c = ConfigLoader()
+    loaded_config = c.load_config_file(
+        os.path.join("test", "fixtures", "config", "toml"),
+        "pyproject.toml",
+    )
+    loaded_config["core"]["dialect"] = "ansi"
+    cfg = FluffConfig(loaded_config)
+
+    # Verify we can later retrieve the config values.
+    assert cfg.get("dialect") == "ansi"
+    assert cfg.get("rules") == ["LT03", "LT09"]
+
+
+def test__config__warn_unknown_rule():
+    """Test warnings when rules are unknown."""
+    lntr = Linter(config=FluffConfig(config_c))
+
+    with fluff_log_catcher(logging.WARNING, "sqlfluff.rules") as caplog:
+        lntr.get_rulepack()
+
+    # Check we get a warning on the unrecognised rule.
+    assert (
+        "Rule configuration contain a section for unexpected rule 'NOT_A_RULE'."
+    ) in caplog.text
+    # Check we get a warning for the deprecated rule.
+    assert (
+        "Rule configuration contain a section for unexpected rule 'L001'."
+    ) in caplog.text
+    # Check we get a hint for the matched rule.
+    assert "match for rule LT01 with name 'layout.spacing'" in caplog.text
+    # Check we get a warning for the group name.
+    assert (
+        "Rule configuration contain a section for unexpected rule 'layout'."
+    ) in caplog.text
+    # Check we get a hint for the matched rule group.
+    # NOTE: We don't check the set explicitly because we can't assume ordering.
+    assert ("The reference was found as a match for multiple rules: {") in caplog.text
+    assert ("LT01") in caplog.text
+    assert ("LT02") in caplog.text
+
+
+def test__process_inline_config():
+    """Test the processing of inline in-file configuration directives."""
+    cfg = FluffConfig(config_b)
+    assert cfg.get("rules") == "LT03"
+
+    cfg.process_inline_config("-- sqlfluff:rules:LT02")
+    assert cfg.get("rules") == "LT02"
+
+    assert cfg.get("tab_space_size", section="indentation") == 4
+    cfg.process_inline_config("-- sqlfluff:indentation:tab_space_size:20")
+    assert cfg.get("tab_space_size", section="indentation") == 20
+
+    assert cfg.get("dialect") == "ansi"
+    assert cfg.get("dialect_obj").name == "ansi"
+    cfg.process_inline_config("-- sqlfluff:dialect:postgres")
+    assert cfg.get("dialect") == "postgres"
+    assert cfg.get("dialect_obj").name == "postgres"
+
+    assert cfg.get("rulez") is None
+    cfg.process_inline_config("-- sqlfluff:rulez:LT06")
+    assert cfg.get("rulez") == "LT06"
diff --git a/test/core/linted_file_test.py b/test/core/linted_file_test.py
index b11459a..c0d68df 100644
--- a/test/core/linted_file_test.py
+++ b/test/core/linted_file_test.py
@@ -146,7 +146,7 @@ def test__linted_file__build_up_fixed_source_string(
             "a {# b #} c",
             [slice(0, 2), slice(2, 9), slice(9, 11)],
         ),
-        # Illustrate potential templating bug (case from L046).
+        # Illustrate potential templating bug (case from JJ01).
         # In this case we have fixes for all our tempolated sections
         # and they are all close to each other and so may be either
         # skipped or duplicated if the logic is not precise.
diff --git a/test/core/linter_test.py b/test/core/linter_test.py
index e0d176a..7103134 100644
--- a/test/core/linter_test.py
+++ b/test/core/linter_test.py
@@ -25,12 +25,13 @@ from sqlfluff.core.linter.runner import get_runner
 import sqlfluff.core.linter as linter
 from sqlfluff.core.parser import GreedyUntil, Ref
 from sqlfluff.core.templaters import TemplatedFile
+from sqlfluff.utils.testing.logging import fluff_log_catcher
 
 
 class DummyLintError(SQLBaseError):
     """Fake lint error used by tests, similar to SQLLintError."""
 
-    def __init__(self, line_no: int, code: str = "L001"):
+    def __init__(self, line_no: int, code: str = "LT01"):
         self._code = code
         super().__init__(line_no=line_no)
 
@@ -211,7 +212,7 @@ def test__linter__lint_string_vs_file(path):
 
 
 @pytest.mark.parametrize(
-    "rules,num_violations", [(None, 7), ("L010", 2), (("L001", "L009", "L031"), 2)]
+    "rules,num_violations", [(None, 6), ("CP01", 2), (("LT01", "LT12"), 1)]
 )
 def test__linter__get_violations_filter_rules(rules, num_violations):
     """Test filtering violations by which rules were violated."""
@@ -402,17 +403,17 @@ def test__linter__empty_file():
 @pytest.mark.parametrize(
     "ignore_templated_areas,check_tuples",
     [
-        (True, [("L006", 3, 39), ("L006", 3, 40)]),
+        (True, [("LT01", 3, 39), ("LT01", 3, 40)]),
         (
             False,
             [
-                # there are still two of each because L006 checks
+                # there are still two of each because LT01 checks
                 # for both *before* and *after* the operator.
                 # The deduplication filter makes sure there aren't 4.
-                ("L006", 3, 16),
-                ("L006", 3, 16),
-                ("L006", 3, 39),
-                ("L006", 3, 40),
+                ("LT01", 3, 16),
+                ("LT01", 3, 16),
+                ("LT01", 3, 39),
+                ("LT01", 3, 40),
             ],
         ),
     ],
@@ -476,7 +477,7 @@ def test__linter__encoding(fname, config_encoding, lexerror):
     lntr = Linter(
         config=FluffConfig(
             overrides={
-                "rules": "L001",
+                "rules": "LT01",
                 "encoding": config_encoding,
                 "dialect": "ansi",
             }
@@ -487,7 +488,7 @@ def test__linter__encoding(fname, config_encoding, lexerror):
 
 
 # noqa tests require a rule_set, therefore we construct dummy rule set for glob matching.
-dummy_rule_codes = [r.code for r in Linter().get_ruleset()]
+dummy_rule_map = Linter().get_rulepack().reference_map
 
 
 @pytest.mark.parametrize(
@@ -497,39 +498,64 @@ dummy_rule_codes = [r.code for r in Linter().get_ruleset()]
         ("noqa", NoQaDirective(0, None, None)),
         ("noqa?", SQLParseError),
         ("noqa:", NoQaDirective(0, None, None)),
-        ("noqa:L001,L002", NoQaDirective(0, ("L001", "L002"), None)),
-        ("noqa: enable=L005", NoQaDirective(0, ("L005",), "enable")),
-        ("noqa: disable=L010", NoQaDirective(0, ("L010",), "disable")),
+        ("noqa:LT01,LT02", NoQaDirective(0, ("LT01", "LT02"), None)),
+        ("noqa: enable=LT01", NoQaDirective(0, ("LT01",), "enable")),
+        ("noqa: disable=CP01", NoQaDirective(0, ("CP01",), "disable")),
         ("noqa: disable=all", NoQaDirective(0, None, "disable")),
         ("noqa: disable", SQLParseError),
         (
-            "Inline comment before inline ignore -- noqa:L001,L002",
-            NoQaDirective(0, ("L001", "L002"), None),
+            "Inline comment before inline ignore -- noqa:LT01,LT02",
+            NoQaDirective(0, ("LT01", "LT02"), None),
         ),
+        # Test selection with rule globs
         (
-            "Inline comment before inline ignore -- noqa:L04*",
+            "noqa:L04*",
             NoQaDirective(
                 0,
                 (
-                    "L040",
-                    "L041",
-                    "L042",
-                    "L043",
-                    "L044",
-                    "L045",
-                    "L046",
-                    "L047",
-                    "L048",
-                    "L049",
+                    "AM04",  # L044 is an alias of AM04
+                    "CP04",  # L040 is an alias of CP04
+                    "CV04",  # L047 is an alias of CV04
+                    "CV05",  # L049 is an alias of CV05
+                    "JJ01",  # L046 is an alias of JJ01
+                    "LT01",  # L048 is an alias of LT01
+                    "LT10",  # L041 is an alias of LT10
+                    "ST02",  # L043 is an alias of ST02
+                    "ST03",  # L045 is an alias of ST03
+                    "ST05",  # L042 is an alias of ST05
                 ),
                 None,
             ),
         ),
+        # Test selection with aliases.
+        (
+            "noqa:L002",
+            NoQaDirective(0, ("LT02",), None),
+        ),
+        # Test selection with alias globs.
+        (
+            "noqa:L00*",
+            NoQaDirective(
+                0,
+                ("LT01", "LT02", "LT03", "LT12"),
+                None,
+            ),
+        ),
+        # Test selection with names.
+        (
+            "noqa:capitalisation.keywords",
+            NoQaDirective(0, ("CP01",), None),
+        ),
+        # Test selection with groups.
+        (
+            "noqa:capitalisation",
+            NoQaDirective(0, ("CP01", "CP02", "CP03", "CP04", "CP05"), None),
+        ),
     ],
 )
 def test_parse_noqa(input, expected):
     """Test correct of "noqa" comments."""
-    result = Linter.parse_noqa(input, 0, rule_codes=dummy_rule_codes)
+    result = Linter.parse_noqa(input, 0, reference_map=dummy_rule_map)
     if not isinstance(expected, type):
         assert result == expected
     else:
@@ -540,7 +566,7 @@ def test_parse_noqa(input, expected):
 def test_parse_noqa_no_dups():
     """Test overlapping glob expansions don't return duplicate rules in noqa."""
     result = Linter.parse_noqa(
-        comment="noqa:L0*5,L01*", line_no=0, rule_codes=dummy_rule_codes
+        comment="noqa:L0*5,L01*", line_no=0, reference_map=dummy_rule_map
     )
     assert len(result.rules) == len(set(result.rules))
 
@@ -556,58 +582,58 @@ def test_parse_noqa_no_dups():
             ],
         ],
         [
-            [dict(comment="noqa: L001", line_no=1)],
+            [dict(comment="noqa: LT01", line_no=1)],
             [DummyLintError(1)],
             [],
         ],
         [
-            [dict(comment="noqa: L001", line_no=2)],
+            [dict(comment="noqa: LT01", line_no=2)],
             [DummyLintError(1)],
             [0],
         ],
         [
-            [dict(comment="noqa: L002", line_no=1)],
+            [dict(comment="noqa: LT02", line_no=1)],
             [DummyLintError(1)],
             [0],
         ],
         [
-            [dict(comment="noqa: enable=L001", line_no=1)],
+            [dict(comment="noqa: enable=LT01", line_no=1)],
             [DummyLintError(1)],
             [0],
         ],
         [
-            [dict(comment="noqa: disable=L001", line_no=1)],
+            [dict(comment="noqa: disable=LT01", line_no=1)],
             [DummyLintError(1)],
             [],
         ],
         [
             [
-                dict(comment="noqa: disable=L001", line_no=2),
-                dict(comment="noqa: enable=L001", line_no=4),
+                dict(comment="noqa: disable=LT01", line_no=2),
+                dict(comment="noqa: enable=LT01", line_no=4),
             ],
             [DummyLintError(1)],
             [0],
         ],
         [
             [
-                dict(comment="noqa: disable=L001", line_no=2),
-                dict(comment="noqa: enable=L001", line_no=4),
+                dict(comment="noqa: disable=LT01", line_no=2),
+                dict(comment="noqa: enable=LT01", line_no=4),
             ],
             [DummyLintError(2)],
             [],
         ],
         [
             [
-                dict(comment="noqa: disable=L001", line_no=2),
-                dict(comment="noqa: enable=L001", line_no=4),
+                dict(comment="noqa: disable=LT01", line_no=2),
+                dict(comment="noqa: enable=LT01", line_no=4),
             ],
             [DummyLintError(3)],
             [],
         ],
         [
             [
-                dict(comment="noqa: disable=L001", line_no=2),
-                dict(comment="noqa: enable=L001", line_no=4),
+                dict(comment="noqa: disable=LT01", line_no=2),
+                dict(comment="noqa: enable=LT01", line_no=4),
             ],
             [DummyLintError(4)],
             [0],
@@ -646,34 +672,34 @@ def test_parse_noqa_no_dups():
         ],
         [
             [
-                dict(comment="noqa: disable=L001", line_no=2),
+                dict(comment="noqa: disable=LT01", line_no=2),
                 dict(comment="noqa: enable=all", line_no=4),
             ],
             [
-                DummyLintError(2, code="L001"),
-                DummyLintError(2, code="L002"),
-                DummyLintError(4, code="L001"),
-                DummyLintError(4, code="L002"),
+                DummyLintError(2, code="LT01"),
+                DummyLintError(2, code="LT02"),
+                DummyLintError(4, code="LT01"),
+                DummyLintError(4, code="LT02"),
             ],
             [1, 2, 3],
         ],
         [
             [
                 dict(comment="noqa: disable=all", line_no=2),
-                dict(comment="noqa: enable=L001", line_no=4),
+                dict(comment="noqa: enable=LT01", line_no=4),
             ],
             [
-                DummyLintError(2, code="L001"),
-                DummyLintError(2, code="L002"),
-                DummyLintError(4, code="L001"),
-                DummyLintError(4, code="L002"),
+                DummyLintError(2, code="LT01"),
+                DummyLintError(2, code="LT02"),
+                DummyLintError(4, code="LT01"),
+                DummyLintError(4, code="LT02"),
             ],
             [2],
         ],
         [
             [
                 dict(
-                    comment="Inline comment before inline ignore -- noqa: L002",
+                    comment="Inline comment before inline ignore -- noqa: LT02",
                     line_no=1,
                 )
             ],
@@ -683,11 +709,11 @@ def test_parse_noqa_no_dups():
         [
             [
                 dict(
-                    comment="Inline comment before inline ignore -- noqa: L002",
+                    comment="Inline comment before inline ignore -- noqa: LT02",
                     line_no=1,
                 ),
                 dict(
-                    comment="Inline comment before inline ignore -- noqa: L002",
+                    comment="Inline comment before inline ignore -- noqa: LT02",
                     line_no=2,
                 ),
             ],
@@ -736,11 +762,11 @@ def test_linted_file_ignore_masked_violations(
     noqa: dict, violations: List[SQLBaseError], expected
 ):
     """Test that _ignore_masked_violations() correctly filters violations."""
-    ignore_mask = [Linter.parse_noqa(rule_codes=dummy_rule_codes, **c) for c in noqa]
+    ignore_mask = [Linter.parse_noqa(reference_map=dummy_rule_map, **c) for c in noqa]
     lf = linter.LintedFile(
         path="",
         violations=violations,
-        time_dict={},
+        timings=None,
         tree=None,
         ignore_mask=ignore_mask,
         templated_file=TemplatedFile.from_string(""),
@@ -757,37 +783,37 @@ def test_linter_noqa():
         config=FluffConfig(
             overrides={
                 "dialect": "bigquery",  # Use bigquery to allow hash comments.
-                "rules": "L012, L019",
+                "rules": "AL02, LT04",
             }
         )
     )
     sql = """
     SELECT
         col_a a,
-        col_b b, --noqa: disable=L012
+        col_b b, --noqa: disable=AL02
         col_c c,
-        col_d d, --noqa: enable=L012
+        col_d d, --noqa: enable=AL02
         col_e e,
         col_f f,
         col_g g,  --noqa
         col_h h,
-        col_i i, --noqa:L012
+        col_i i, --noqa:AL02
         col_j j,
-        col_k k, --noqa:L013
+        col_k k, --noqa:AL03
         col_l l,
         col_m m,
         col_n n, --noqa: disable=all
         col_o o,
         col_p p, --noqa: enable=all
-        col_q q, --Inline comment --noqa: L012
-        col_r r, /* Block comment */ --noqa: L012
-        col_s s # hash comment --noqa: L012
-        -- We trigger both L012 (implicit aliasing)
-        -- and L019 (leading commas) here to
+        col_q q, --Inline comment --noqa: AL02
+        col_r r, /* Block comment */ --noqa: AL02
+        col_s s # hash comment --noqa: AL02
+        -- We trigger both AL02 (implicit aliasing)
+        -- and LT04 (leading commas) here to
         -- test glob ignoring of multiple rules.
         , col_t t --noqa: L01*
         , col_u u -- Some comment --noqa: L01*
-        , col_v v -- We can ignore both L012 and L019 -- noqa: L01[29]
+        , col_v v -- We can ignore both AL02 and LT04 -- noqa: L01[29]
     FROM foo
         """
     result = lntr.lint_string(sql)
@@ -802,7 +828,7 @@ def test_linter_noqa_with_templating():
             overrides={
                 "dialect": "bigquery",  # Use bigquery to allow hash comments.
                 "templater": "jinja",
-                "rules": "L016",
+                "rules": "LT05",
             }
         )
     )
@@ -810,10 +836,10 @@ def test_linter_noqa_with_templating():
     '"{%- set a_var = ["1", "2"] -%}\n'
     "SELECT\n"
     "  this_is_just_a_very_long_line_for_demonstration_purposes_of_a_bug_involving_"
-    "templated_sql_files, --noqa: L016\n"
-    "  this_is_not_so_big a, --Inline comment --noqa: L012\n"
-    "  this_is_not_so_big b, /* Block comment */ --noqa: L012\n"
-    "  this_is_not_so_big c, # hash comment --noqa: L012\n"
+    "templated_sql_files, --noqa: LT05\n"
+    "  this_is_not_so_big a, --Inline comment --noqa: AL02\n"
+    "  this_is_not_so_big b, /* Block comment */ --noqa: AL02\n"
+    "  this_is_not_so_big c, # hash comment --noqa: AL02\n"
     "  this_is_just_a_very_long_line_for_demonstration_purposes_of_a_bug_involving_"
     "templated_sql_files, --noqa: L01*\n"
     "FROM\n"
@@ -856,7 +882,7 @@ def test_linter_noqa_tmp():
     lntr = Linter(
         config=FluffConfig(
             overrides={
-                "exclude_rules": "L050",
+                "exclude_rules": "LT13",
                 "dialect": "ansi",
             }
         )
@@ -876,7 +902,7 @@ def test_linter_noqa_disable():
     lntr_noqa_enabled = Linter(
         config=FluffConfig(
             overrides={
-                "rules": "L012",
+                "rules": "AL02",
                 "dialect": "ansi",
             }
         )
@@ -885,16 +911,16 @@ def test_linter_noqa_disable():
         config=FluffConfig(
             overrides={
                 "disable_noqa": True,
-                "rules": "L012",
+                "rules": "AL02",
                 "dialect": "ansi",
             }
         )
     )
-    # This query raises L012, but it is being suppressed by the inline noqa comment.
+    # This query raises AL02, but it is being suppressed by the inline noqa comment.
     # We can ignore this comment by setting disable_noqa = True in the config
     # or by using the --disable-noqa flag in the CLI.
     sql = """
-    SELECT col_a a --noqa: L012
+    SELECT col_a a --noqa: AL02
     FROM foo
     """
 
@@ -907,7 +933,7 @@ def test_linter_noqa_disable():
     result_noqa_disabled = lntr_noqa_disabled.lint_string(sql)
     violations_noqa_disabled = result_noqa_disabled.get_violations()
     assert len(violations_noqa_disabled) == 1
-    assert violations_noqa_disabled[0].rule.code == "L012"
+    assert violations_noqa_disabled[0].rule.code == "AL02"
 
 
 def test_delayed_exception():
@@ -918,7 +944,7 @@ def test_delayed_exception():
         de.reraise()
 
 
-def test__attempt_to_change_templater_warning(caplog):
+def test__attempt_to_change_templater_warning():
     """Test warning when changing templater in .sqlfluff file in subdirectory."""
     initial_config = FluffConfig(
         configs={"core": {"templater": "jinja", "dialect": "ansi"}}
@@ -927,20 +953,14 @@ def test__attempt_to_change_templater_warning(caplog):
     updated_config = FluffConfig(
         configs={"core": {"templater": "python", "dialect": "ansi"}}
     )
-    logger = logging.getLogger("sqlfluff")
-    original_propagate_value = logger.propagate
-    try:
-        logger.propagate = True
-        with caplog.at_level(logging.WARNING, logger="sqlfluff.linter"):
-            lntr.render_string(
-                in_str="select * from table",
-                fname="test.sql",
-                config=updated_config,
-                encoding="utf-8",
-            )
-        assert "Attempt to set templater to " in caplog.text
-    finally:
-        logger.propagate = original_propagate_value
+    with fluff_log_catcher(logging.WARNING, "sqlfluff.linter") as caplog:
+        lntr.render_string(
+            in_str="select * from table",
+            fname="test.sql",
+            config=updated_config,
+            encoding="utf-8",
+        )
+    assert "Attempt to set templater to " in caplog.text
 
 
 @pytest.mark.parametrize(
diff --git a/test/core/parser/lexer_test.py b/test/core/parser/lexer_test.py
index 2b06d78..587f73e 100644
--- a/test/core/parser/lexer_test.py
+++ b/test/core/parser/lexer_test.py
@@ -2,8 +2,12 @@
 
 import pytest
 import logging
+from typing import Any, Dict, Tuple, NamedTuple, List, Union
 
 from sqlfluff.core.parser import Lexer, CodeSegment, NewlineSegment
+from sqlfluff.core.parser.segments.meta import TemplateSegment
+from sqlfluff.core.templaters import JinjaTemplater, TemplatedFile, RawFileSlice
+from sqlfluff.core.templaters.base import TemplatedFileSlice
 from sqlfluff.core.parser.lexer import (
     StringLexer,
     LexMatch,
@@ -169,3 +173,245 @@ def test__parser__lexer_trim_post_subdivide(caplog):
         assert res.elements[1].raw == "\n"
         assert res.elements[2].raw == "/"
         assert len(res.elements) == 3
+
+
+class _LexerSlicingCase(NamedTuple):
+    name: str
+    in_str: str
+    context: Dict[str, Any]
+    # (
+    #     raw,
+    #     source_str (if TemplateSegment),
+    #     block_type (if TemplateSegment),
+    #     segment_type
+    # )
+    expected_segments: List[Tuple[str, Union[str, None], Union[str, None], str]]
+
+
+def _statement(*args, **kwargs):
+    return ""
+
+
+def _load_result(*args, **kwargs):
+    return ["foo", "bar"]
+
+
+@pytest.mark.parametrize(
+    "case",
+    [
+        _LexerSlicingCase(
+            name="call macro and function overrides",
+            in_str="{% call statement('unique_keys', fetch_result=true) %}\n"
+            "    select 1 as test\n"
+            "{% endcall %}\n"
+            "{% set unique_keys = load_result('unique_keys') %}\n"
+            "select 2\n",
+            context={"statement": _statement, "load_result": _load_result},
+            expected_segments=[
+                (
+                    "",
+                    "{% call statement('unique_keys', fetch_result=true) %}",
+                    "block_start",
+                    "placeholder",
+                ),
+                ("", None, None, "indent"),
+                ("", "\n    select 1 as test\n", "literal", "placeholder"),
+                ("", None, None, "dedent"),
+                ("", "{% endcall %}", "block_end", "placeholder"),
+                ("\n", None, None, "newline"),
+                (
+                    "",
+                    "{% set unique_keys = load_result('unique_keys') %}",
+                    "templated",
+                    "placeholder",
+                ),
+                ("\n", None, None, "newline"),
+                ("select", None, None, "raw"),
+                (" ", None, None, "whitespace"),
+                ("2", None, None, "literal"),
+                ("\n", None, None, "newline"),
+                ("", None, None, "end_of_file"),
+            ],
+        ),
+        _LexerSlicingCase(
+            name="call an existing macro",
+            in_str="{% macro render_name(title) %}\n"
+            "  '{{ title }}. foo' as {{ caller() }}\n"
+            "{% endmacro %}\n"
+            "SELECT\n"
+            "    {% call render_name('Sir') %}\n"
+            "        bar\n"
+            "    {% endcall %}\n"
+            "FROM baz\n",
+            context={},
+            expected_segments=[
+                ("", "{% macro render_name(title) %}", "block_start", "placeholder"),
+                ("", None, None, "indent"),
+                ("", "\n  '", "literal", "placeholder"),
+                ("", "{{ title }}", "templated", "placeholder"),
+                ("", ". foo' as ", "literal", "placeholder"),
+                ("", "{{ caller() }}", "templated", "placeholder"),
+                ("", "\n", "literal", "placeholder"),
+                ("", None, None, "dedent"),
+                ("", "{% endmacro %}", "block_end", "placeholder"),
+                ("\n", None, None, "newline"),
+                ("SELECT", None, None, "raw"),
+                ("\n", None, None, "newline"),
+                ("    ", None, None, "whitespace"),
+                ("\n", None, None, "newline"),
+                ("  ", None, None, "whitespace"),
+                ("'Sir. foo'", None, None, "raw"),
+                (" ", None, None, "whitespace"),
+                ("as", None, None, "raw"),
+                (" ", None, None, "whitespace"),
+                ("\n", None, None, "newline"),
+                ("        ", None, None, "whitespace"),
+                ("bar", None, None, "raw"),
+                ("\n", None, None, "newline"),
+                ("    ", None, None, "whitespace"),
+                ("\n", None, None, "newline"),
+                ("", "\n        bar\n    ", "literal", "placeholder"),
+                ("", None, None, "dedent"),
+                ("", "{% endcall %}", "block_end", "placeholder"),
+                ("\n", None, None, "newline"),
+                ("FROM", None, None, "raw"),
+                (" ", None, None, "whitespace"),
+                ("baz", None, None, "raw"),
+                ("\n", None, None, "newline"),
+                ("", None, None, "end_of_file"),
+            ],
+        ),
+    ],
+    ids=lambda case: case.name,
+)
+def test__parser__lexer_slicing_calls(case: _LexerSlicingCase):
+    """Test slicing of call blocks.
+
+    https://github.com/sqlfluff/sqlfluff/issues/4013
+    """
+    config = FluffConfig(overrides={"dialect": "ansi"})
+
+    templater = JinjaTemplater(override_context=case.context)
+
+    templated_file, templater_violations = templater.process(
+        in_str=case.in_str, fname="test.sql", config=config, formatter=None
+    )
+
+    assert (
+        not templater_violations
+    ), f"Found templater violations: {templater_violations}"
+
+    lexer = Lexer(config=config)
+    lexing_segments, lexing_violations = lexer.lex(templated_file)
+
+    assert not lexing_violations, f"Found templater violations: {lexing_violations}"
+    assert case.expected_segments == [
+        (
+            seg.raw,
+            seg.source_str if isinstance(seg, TemplateSegment) else None,
+            seg.block_type if isinstance(seg, TemplateSegment) else None,
+            seg.type,
+        )
+        for seg in lexing_segments
+    ]
+
+
+class _LexerSlicingTemplateFileCase(NamedTuple):
+    name: str
+    # easy way to build inputs here is to call templater.process in
+    # test__parser__lexer_slicing_calls and adjust the output how you like:
+    file: TemplatedFile
+    # (
+    #     raw,
+    #     source_str (if TemplateSegment),
+    #     block_type (if TemplateSegment),
+    #     segment_type
+    # )
+    expected_segments: List[Tuple[str, Union[str, None], Union[str, None], str]]
+
+
+@pytest.mark.parametrize(
+    "case",
+    [
+        _LexerSlicingTemplateFileCase(
+            name="very simple test case",
+            file=TemplatedFile(
+                source_str="SELECT {# comment #}1;",
+                templated_str="SELECT 1;",
+                fname="test.sql",
+                sliced_file=[
+                    TemplatedFileSlice("literal", slice(0, 7, None), slice(0, 7, None)),
+                    TemplatedFileSlice(
+                        "comment", slice(7, 20, None), slice(7, 7, None)
+                    ),
+                    TemplatedFileSlice(
+                        "literal", slice(20, 22, None), slice(7, 9, None)
+                    ),
+                ],
+                raw_sliced=[
+                    RawFileSlice("SELECT ", "literal", 0, None, 0),
+                    RawFileSlice("{# comment #}", "comment", 7, None, 0),
+                    RawFileSlice("1;", "literal", 20, None, 0),
+                ],
+            ),
+            expected_segments=[
+                ("SELECT", None, None, "raw"),
+                (" ", None, None, "whitespace"),
+                ("", "{# comment #}", "comment", "placeholder"),
+                ("1", None, None, "literal"),
+                (";", None, None, "raw"),
+                ("", None, None, "end_of_file"),
+            ],
+        ),
+        _LexerSlicingTemplateFileCase(
+            name="special zero length slice type is kept",
+            file=TemplatedFile(
+                source_str="SELECT 1;",
+                templated_str="SELECT 1;",
+                fname="test.sql",
+                sliced_file=[
+                    TemplatedFileSlice("literal", slice(0, 7, None), slice(0, 7, None)),
+                    # this is a special marker that the templater wants to show up
+                    # as a meta segment:
+                    TemplatedFileSlice(
+                        "special_type", slice(7, 7, None), slice(7, 7, None)
+                    ),
+                    TemplatedFileSlice("literal", slice(7, 9, None), slice(7, 9, None)),
+                ],
+                raw_sliced=[
+                    RawFileSlice("SELECT 1;", "literal", 0, None, 0),
+                ],
+            ),
+            expected_segments=[
+                ("SELECT", None, None, "raw"),
+                (" ", None, None, "whitespace"),
+                ("", "", "special_type", "placeholder"),
+                ("1", None, None, "literal"),
+                (";", None, None, "raw"),
+                ("", None, None, "end_of_file"),
+            ],
+        ),
+    ],
+    ids=lambda case: case.name,
+)
+def test__parser__lexer_slicing_from_template_file(case: _LexerSlicingTemplateFileCase):
+    """Test slicing using a provided TemplateFile.
+
+    Useful for testing special inputs without having to find a templater to trick
+    and yield the input you want to test.
+    """
+    config = FluffConfig(overrides={"dialect": "ansi"})
+
+    lexer = Lexer(config=config)
+    lexing_segments, lexing_violations = lexer.lex(case.file)
+
+    assert not lexing_violations, f"Found templater violations: {lexing_violations}"
+    assert case.expected_segments == [
+        (
+            seg.raw,
+            seg.source_str if isinstance(seg, TemplateSegment) else None,
+            seg.block_type if isinstance(seg, TemplateSegment) else None,
+            seg.type,
+        )
+        for seg in lexing_segments
+    ]
diff --git a/test/core/parser/markers_test.py b/test/core/parser/markers_test.py
index 840c7a1..80dbb7b 100644
--- a/test/core/parser/markers_test.py
+++ b/test/core/parser/markers_test.py
@@ -33,11 +33,14 @@ def test_markers__setting_position_raw():
     pos = PositionMarker(slice(2, 5), slice(2, 5), templ)
     # Can we infer positions correctly?
     assert pos.working_loc == (1, 3)
+    # Check other marker properties work too (i.e. source properties)
+    assert pos.line_no == 1
+    assert pos.line_pos == 3  # i.e. 2 + 1 (for 1-indexed)
 
 
 def test_markers__setting_position_working():
     """Test that we can correctly set positions manually."""
     templ = TemplatedFile.from_string("foobar")
     pos = PositionMarker(slice(2, 5), slice(2, 5), templ, 4, 4)
-    # Can we NOT infer when we're told.
+    # Can we don't infer when we're explicitly told.
     assert pos.working_loc == (4, 4)
diff --git a/test/core/parser/segments_base_test.py b/test/core/parser/segments_base_test.py
index 03e1e96..2367451 100644
--- a/test/core/parser/segments_base_test.py
+++ b/test/core/parser/segments_base_test.py
@@ -11,6 +11,7 @@ from sqlfluff.core.parser import (
 from sqlfluff.core.parser.segments.base import PathStep
 from sqlfluff.core.templaters import TemplatedFile
 from sqlfluff.core.parser.context import RootParseContext
+from sqlfluff.core.rules.base import LintFix
 
 
 @pytest.fixture(scope="module")
@@ -71,21 +72,73 @@ def test__parser__base_segments_count_segments(raw_seg_list):
     assert test_seg.count_segments(raw_only=True) == 2
 
 
+@pytest.mark.parametrize(
+    "list_in, result",
+    [
+        (["foo"], None),
+        (["foo", " "], -1),
+        ([" ", "foo", " "], 0),
+        ([" ", "foo"], 0),
+        ([" "], 0),
+        ([], None),
+    ],
+)
+def test__parser_base_segments_find_start_or_end_non_code(
+    generate_test_segments, list_in, result
+):
+    """Test BaseSegment._find_start_or_end_non_code()."""
+    assert (
+        BaseSegment._find_start_or_end_non_code(generate_test_segments(list_in))
+        == result
+    )
+
+
+def test__parser_base_segments_compute_anchor_edit_info(raw_seg_list):
+    """Test BaseSegment.compute_anchor_edit_info()."""
+    # Construct a fix buffer, intentionally with:
+    # - one duplicate.
+    # - two different incompatible fixes on the same segment.
+    fixes = [
+        LintFix.replace(raw_seg_list[0], [raw_seg_list[0].edit(raw="a")]),
+        LintFix.replace(raw_seg_list[0], [raw_seg_list[0].edit(raw="a")]),
+        LintFix.replace(raw_seg_list[0], [raw_seg_list[0].edit(raw="b")]),
+    ]
+    anchor_info_dict = BaseSegment.compute_anchor_edit_info(fixes)
+    # Check the target segment is the only key we have.
+    assert list(anchor_info_dict.keys()) == [raw_seg_list[0].uuid]
+    anchor_info = anchor_info_dict[raw_seg_list[0].uuid]
+    # Check that the duplicate as been deduplicated.
+    # i.e. this isn't 3.
+    assert anchor_info.replace == 2
+    # Check the fixes themselves.
+    # NOTE: There's no duplicated first fix.
+    assert anchor_info.fixes == [
+        LintFix.replace(raw_seg_list[0], [raw_seg_list[0].edit(raw="a")]),
+        LintFix.replace(raw_seg_list[0], [raw_seg_list[0].edit(raw="b")]),
+    ]
+    # Check the first replace
+    assert anchor_info._first_replace == LintFix.replace(
+        raw_seg_list[0], [raw_seg_list[0].edit(raw="a")]
+    )
+
+
 def test__parser__base_segments_path_to(raw_seg_list):
     """Test the .path_to() method."""
     test_seg_a = DummyAuxSegment(raw_seg_list)
     test_seg_b = DummySegment([test_seg_a])
     # With a direct parent/child relationship we only get
     # one element of path.
-    assert test_seg_b.path_to(test_seg_a) == [PathStep(test_seg_b, 0, 1)]
+    # NOTE: All the dummy segments return True for .is_code()
+    # so that means the do appear in code_idxs.
+    assert test_seg_b.path_to(test_seg_a) == [PathStep(test_seg_b, 0, 1, (0,))]
     # With a three segment chain - we get two path elements.
     assert test_seg_b.path_to(raw_seg_list[0]) == [
-        PathStep(test_seg_b, 0, 1),
-        PathStep(test_seg_a, 0, 2),
+        PathStep(test_seg_b, 0, 1, (0,)),
+        PathStep(test_seg_a, 0, 2, (0, 1)),
     ]
     assert test_seg_b.path_to(raw_seg_list[1]) == [
-        PathStep(test_seg_b, 0, 1),
-        PathStep(test_seg_a, 1, 2),
+        PathStep(test_seg_b, 0, 1, (0,)),
+        PathStep(test_seg_a, 1, 2, (0, 1)),
     ]
 
 
@@ -199,7 +252,10 @@ def test__parser__raw_segments_with_ancestors(raw_seg_list):
     assert test_seg.raw_segments_with_ancestors == [
         (
             raw_seg_list[0],
-            [PathStep(test_seg, 0, 2), PathStep(test_seg.segments[0], 0, 1)],
+            [
+                PathStep(test_seg, 0, 2, (0, 1)),
+                PathStep(test_seg.segments[0], 0, 1, (0,)),
+            ],
         ),
-        (raw_seg_list[1], [PathStep(test_seg, 1, 2)]),
+        (raw_seg_list[1], [PathStep(test_seg, 1, 2, (0, 1))]),
     ]
diff --git a/test/core/plugin_test.py b/test/core/plugin_test.py
index 8414aa2..c7c2ac0 100644
--- a/test/core/plugin_test.py
+++ b/test/core/plugin_test.py
@@ -1,4 +1,6 @@
 """Plugin related tests."""
+import pytest
+
 from sqlfluff.core.plugin.host import get_plugin_manager
 from sqlfluff.core.config import FluffConfig
 
@@ -9,27 +11,45 @@ def test__plugin_manager_registers_example_plugin():
     # The plugin import order is non-deterministic.
     # Use sets in case the dbt plugin (or other plugins) are
     # already installed too.
-    assert set(
+    installed_plugins = set(
         plugin_module.__name__ for plugin_module in plugin_manager.get_plugins()
-    ).issuperset(
+    )
+    print(f"Installed plugins: {installed_plugins}")
+    assert installed_plugins.issuperset(
         {
+            # Check that both the v1 and v2 example are correctly
+            # installed.
             "example.rules",
             "sqlfluff.core.plugin.lib",
         }
     )
 
 
-def test__plugin_example_rules_returned():
+@pytest.mark.parametrize(
+    "rule_ref",
+    # Check both V1 plugin
+    ["Rule_Example_L001"],
+)
+def test__plugin_example_rules_returned(rule_ref):
     """Test that the example rules from the plugin are returned."""
     plugin_manager = get_plugin_manager()
     # The plugin import order is non-deterministic
-    assert "Rule_Example_L001" in [
+    rule_names = [
         rule.__name__ for rules in plugin_manager.hook.get_rules() for rule in rules
     ]
+    print(f"Rule names: {rule_names}")
+    assert rule_ref in rule_names
 
 
-def test__plugin_default_config_read():
+@pytest.mark.parametrize(
+    "rule_ref,config_option",
+    # Check both V1 and V2 rule plugins.
+    [("Example_L001", "forbidden_columns")],
+)
+def test__plugin_default_config_read(rule_ref, config_option):
     """Test that the example plugin default config is merged into FluffConfig."""
     fluff_config = FluffConfig(overrides={"dialect": "ansi"})
     # The plugin import order is non-deterministic
-    assert "forbidden_columns" in fluff_config._configs["rules"]["Example_L001"]
+    print(f"Detected config sections: {fluff_config._configs['rules'].keys()}")
+    # Check V1
+    assert config_option in fluff_config._configs["rules"][rule_ref]
diff --git a/test/core/rules/docstring_test.py b/test/core/rules/docstring_test.py
index 25f551f..29ffb88 100644
--- a/test/core/rules/docstring_test.py
+++ b/test/core/rules/docstring_test.py
@@ -1,13 +1,13 @@
 """Test rules docstring."""
 import pytest
+import re
 
 from sqlfluff import lint
 from sqlfluff.core.plugin.host import get_plugin_manager
-from sqlfluff.core.rules.doc_decorators import is_configurable, is_documenting_groups
 
-KEYWORD_ANTI = "    **Anti-pattern**"
-KEYWORD_BEST = "    **Best practice**"
-KEYWORD_CODE_BLOCK = "\n    .. code-block:: sql\n"
+KEYWORD_ANTI = re.compile(r"    \*\*Anti-pattern\*\*")
+KEYWORD_BEST = re.compile(r"    \*\*Best practice\*\*")
+KEYWORD_CODE_BLOCK = re.compile(r"\n    \.\. code-block:: (sql|jinja)\n")
 
 
 @pytest.mark.parametrize(
@@ -23,7 +23,7 @@ def test_content_count(content, min_count):
     for plugin_rules in get_plugin_manager().hook.get_rules():
         for rule in plugin_rules:
             if rule._check_docstring is True:
-                assert rule.__doc__.count(content) >= min_count, (
+                assert len(content.findall(rule.__doc__)) >= min_count, (
                     f"{rule.__name__} content {content} does not occur at least "
                     f"{min_count} times"
                 )
@@ -34,35 +34,14 @@ def test_keyword_anti_before_best():
     for plugin_rules in get_plugin_manager().hook.get_rules():
         for rule in plugin_rules:
             if rule._check_docstring is True:
-                assert rule.__doc__.index(KEYWORD_ANTI) < rule.__doc__.index(
-                    KEYWORD_BEST
-                ), (
+                best_pos = KEYWORD_BEST.search(rule.__doc__).start()
+                anti_pos = KEYWORD_ANTI.search(rule.__doc__).start()
+                assert anti_pos < best_pos, (
                     f"{rule.__name__} keyword {KEYWORD_BEST} appears before "
                     f"{KEYWORD_ANTI}"
                 )
 
 
-def test_config_decorator():
-    """Test rules with config_keywords have the @document_configuration decorator."""
-    for plugin_rules in get_plugin_manager().hook.get_rules():
-        for rule in plugin_rules:
-            if hasattr(rule, "config_keywords"):
-                assert is_configurable(rule), (
-                    f"Rule {rule.__name__} has config but is not decorated with "
-                    "@document_configuration to display that config."
-                )
-
-
-def test_groups_decorator():
-    """Test rules with groups have the @document_groups decorator."""
-    for plugin_rules in get_plugin_manager().hook.get_rules():
-        for rule in plugin_rules:
-            if hasattr(rule, "groups"):
-                assert is_documenting_groups(
-                    rule
-                ), f'Rule {rule.__name__} does not specify "@document_groups".'
-
-
 def test_backtick_replace():
     """Test replacing docstring double backticks for lint results."""
     sql = """
@@ -71,8 +50,8 @@ def test_backtick_replace():
         b
     FROM foo
     """
-    result = lint(sql, rules=["L015"])
-    # L015 docstring looks like:
+    result = lint(sql, rules=["ST08"])
+    # ST08 docstring looks like:
     # ``DISTINCT`` used with parentheses.
     # Check the double bacticks (``) get replaced by a single quote (').
     assert result[0]["description"] == "'DISTINCT' used with parentheses."
diff --git a/test/core/rules/rules_test.py b/test/core/rules/rules_test.py
index 8029065..ec1d756 100644
--- a/test/core/rules/rules_test.py
+++ b/test/core/rules/rules_test.py
@@ -1,16 +1,19 @@
 """Tests for the standard set of rules."""
 import pytest
+import logging
 
 from sqlfluff.core import Linter
+from sqlfluff.core.linter import RuleTuple
 from sqlfluff.core.parser.markers import PositionMarker
+from sqlfluff.core.errors import SQLFluffUserError
 from sqlfluff.core.rules import BaseRule, LintResult, LintFix
 from sqlfluff.core.rules import get_ruleset
-from sqlfluff.core.rules.crawlers import RootOnlyCrawler, SegmentSeekerCrawler
 from sqlfluff.core.rules.doc_decorators import (
-    document_configuration,
     document_fix_compatible,
     document_groups,
+    document_configuration,
 )
+from sqlfluff.core.rules.crawlers import RootOnlyCrawler, SegmentSeekerCrawler
 from sqlfluff.core.config import FluffConfig
 from sqlfluff.core.parser import WhitespaceSegment
 from sqlfluff.core.templaters.base import TemplatedFile
@@ -19,6 +22,7 @@ from sqlfluff.utils.testing.rules import get_rule_from_set
 from test.fixtures.rules.custom.L000 import Rule_L000
 from test.fixtures.rules.custom.S000 import Rule_S000
 from sqlfluff.core.rules.loader import get_rules_from_path
+from sqlfluff.utils.testing.logging import fluff_log_catcher
 
 
 class Rule_T042(BaseRule):
@@ -30,8 +34,6 @@ class Rule_T042(BaseRule):
         pass
 
 
-@document_groups
-@document_fix_compatible
 class Rule_T001(BaseRule):
     """A deliberately malicious rule.
 
@@ -42,6 +44,7 @@ class Rule_T001(BaseRule):
 
     groups = ("all",)
     crawl_behaviour = SegmentSeekerCrawler({"whitespace"})
+    is_fix_compatible = True
 
     def _eval(self, context):
         """Stars make newlines."""
@@ -80,13 +83,89 @@ def test__rules__user_rules():
     # Set up a linter with the user rule
     linter = Linter(user_rules=[Rule_T042], dialect="ansi")
     # Make sure the new one is in there.
-    assert ("T042", "A dummy rule.") in linter.rule_tuples()
+    assert RuleTuple("T042", "", "A dummy rule.", ("all",), ()) in linter.rule_tuples()
     # Instantiate a second linter and check it's NOT in there.
     # This tests that copying and isolation works.
     linter = Linter(dialect="ansi")
     assert not any(rule[0] == "T042" for rule in linter.rule_tuples())
 
 
+@pytest.mark.parametrize(
+    "rules, exclude_rules, resulting_codes",
+    [
+        # NB: We don't check the "select nothing" case, because not setting
+        # the rules setting just means "select everything".
+        # ("", "", set()),
+        # 1: Select by code.
+        # NOTE: T012 uses T011 as it's name but that should be ignored
+        # because of the conflict.
+        ("T010", "", {"T010"}),
+        ("T010,T011", "", {"T010", "T011"}),
+        ("T010,T011", "T011", {"T010"}),
+        # 2: Select by name
+        # NOTE: T012 uses "fake_other" as it's group but that should be ignored
+        # because of the conflict.
+        ("fake_basic", "", {"T010"}),
+        ("fake_other", "", {"T011"}),
+        ("fake_basic,fake_other", "", {"T010", "T011"}),
+        # 3: Select by group
+        # NOTE: T010 uses "foo" as it's alias but that should be ignored
+        # because of the conflict.
+        ("test", "", {"T010", "T011"}),
+        ("foo", "", {"T011", "T012"}),
+        ("test,foo", "", {"T010", "T011", "T012"}),
+        ("test", "foo", {"T010"}),
+        # 3: Select by alias
+        ("fb1", "", {"T010"}),
+        ("fb2", "", {"T011"}),
+    ],
+)
+def test__rules__rule_selection(rules, exclude_rules, resulting_codes):
+    """Test that rule selection works by various means."""
+
+    class Rule_T010(BaseRule):
+        """Fake Basic Rule."""
+
+        groups = ("all", "test")
+        name = "fake_basic"
+        aliases = ("fb1", "foo")  # NB: Foo is a group on another rule.
+        crawl_behaviour = RootOnlyCrawler()
+
+        def _eval(self, **kwargs):
+            pass
+
+    class Rule_T011(Rule_T010):
+        """Fake Basic Rule.
+
+        NOTE: We inherit crawl behaviour and _eval from above.
+        """
+
+        groups = ("all", "test", "foo")
+        name = "fake_other"
+        aliases = ("fb2",)
+
+    class Rule_T012(Rule_T010):
+        """Fake Basic Rule.
+
+        NOTE: We inherit crawl behaviour and _eval from above.
+        """
+
+        # NB: "fake_other" is the name of another rule.
+        groups = ("all", "foo", "fake_other")
+        # No aliases, Name collides with the alias of another rule.
+        name = "fake_again"
+        aliases = ()
+
+    cfg = FluffConfig(
+        overrides={"rules": rules, "exclude_rules": exclude_rules, "dialect": "ansi"}
+    )
+    linter = Linter(config=cfg, user_rules=[Rule_T010, Rule_T011, Rule_T012])
+    # Get the set of selected codes:
+    selected_codes = set(tpl[0] for tpl in linter.rule_tuples())
+    # Check selected rules
+    assert selected_codes == resulting_codes
+
+
 def test__rules__filter_uparsable():
     """Test that rules that handle their own crawling respect unparsable."""
     # Set up a linter with the user rule
@@ -121,35 +200,73 @@ def test__rules__runaway_fail_catch():
 def test_rules_cannot_be_instantiated_without_declared_configs():
     """Ensure that new rules must be instantiated with config values."""
 
-    class NewRule(BaseRule):
-        config_keywords = ["comma_style"]
+    class Rule_NewRule_ZZ99(BaseRule):
+        """Testing Rule."""
 
-    new_rule = NewRule(code="L000", description="", comma_style="trailing")
-    assert new_rule.comma_style == "trailing"
-    # Error is thrown since "comma_style" is defined in class,
+        config_keywords = ["tab_space_size"]
+
+    new_rule = Rule_NewRule_ZZ99(code="L000", description="", tab_space_size=6)
+    assert new_rule.tab_space_size == 6
+    # Error is thrown since "tab_space_size" is defined in class,
     # but not upon instantiation
     with pytest.raises(ValueError):
-        new_rule = NewRule(code="L000", description="")
+        new_rule = Rule_NewRule_ZZ99(code="L000", description="")
+
+
+def test_rules_legacy_doc_decorators(caplog):
+    """Ensure that the deprecated decorators can still be imported but do nothing."""
+    with fluff_log_catcher(logging.WARNING, "sqlfluff") as caplog:
+
+        @document_fix_compatible
+        @document_groups
+        @document_configuration
+        class Rule_NewRule_ZZ99(BaseRule):
+            """Untouched Text."""
+
+            pass
+
+    # Check they didn't do anything to the docstring.
+    assert Rule_NewRule_ZZ99.__doc__ == """Untouched Text."""
+    # Check there are warnings.
+    print("Records:")
+    for record in caplog.records:
+        print(record)
+    assert "uses the @document_fix_compatible decorator" in caplog.text
+    assert "uses the @document_groups decorator" in caplog.text
+    assert "uses the @document_configuration decorator" in caplog.text
 
 
 def test_rules_configs_are_dynamically_documented():
     """Ensure that rule configurations are added to the class docstring."""
 
-    @document_configuration
-    class RuleWithConfig(BaseRule):
+    class RuleWithConfig_ZZ99(BaseRule):
         """A new rule with configuration."""
 
-        config_keywords = ["max_line_length"]
+        config_keywords = ["unquoted_identifiers_policy"]
 
-    assert "max_line_length" in RuleWithConfig.__doc__
+    print(f"RuleWithConfig_ZZ99.__doc__: {RuleWithConfig_ZZ99.__doc__!r}")
+    assert "unquoted_identifiers_policy" in RuleWithConfig_ZZ99.__doc__
 
-    @document_configuration
-    class RuleWithoutConfig(BaseRule):
+    class RuleWithoutConfig_ZZ99(BaseRule):
         """A new rule without configuration."""
 
         pass
 
-    assert "Configuration" not in RuleWithoutConfig.__doc__
+    print(f"RuleWithoutConfig_ZZ99.__doc__: {RuleWithoutConfig_ZZ99.__doc__!r}")
+    assert "Configuration" not in RuleWithoutConfig_ZZ99.__doc__
+
+
+def test_rules_name_validation():
+    """Ensure that rule names are validated."""
+    with pytest.raises(SQLFluffUserError) as exc_info:
+
+        class RuleWithoutBadName_ZZ99(BaseRule):
+            """A new rule without configuration."""
+
+            name = "MY-KEBAB-CASE-NAME"
+
+    assert "Tried to define rule with unexpected name" in exc_info.value.args[0]
+    assert "MY-KEBAB-CASE-NAME" in exc_info.value.args[0]
 
 
 def test_rule_exception_is_caught_to_validation():
@@ -178,13 +295,13 @@ def test_rule_must_belong_to_all_group():
     """Assert correct 'groups' config for rule."""
     std_rule_set = get_ruleset()
 
-    with pytest.raises(AttributeError):
+    with pytest.raises(AssertionError):
 
         @std_rule_set.register
         class Rule_T000(BaseRule):
             """Badly configured rule, no groups attribute."""
 
-            def _eval(self, segment, parent_stack, **kwargs):
+            def _eval(self, **kwargs):
                 pass
 
     with pytest.raises(AssertionError):
@@ -195,7 +312,7 @@ def test_rule_must_belong_to_all_group():
 
             groups = ()
 
-            def _eval(self, segment, parent_stack, **kwargs):
+            def _eval(self, **kwargs):
                 pass
 
 
diff --git a/test/core/templaters/jinja_test.py b/test/core/templaters/jinja_test.py
index ddc3862..62d6844 100644
--- a/test/core/templaters/jinja_test.py
+++ b/test/core/templaters/jinja_test.py
@@ -25,6 +25,17 @@ JINJA_STRING = (
     "{% endif %}{% endfor %} WHERE {{condition}}\n\n"
 )
 
+JINJA_MACRO_CALL_SQL = (
+    "{% macro render_name(title) %}\n"
+    "  '{{ title }}. foo' as {{ caller() }}\n"
+    "{% endmacro %}\n"
+    "SELECT\n"
+    "    {% call render_name('Sir') %}\n"
+    "        bar\n"
+    "    {% endcall %}\n"
+    "FROM baz\n"
+)
+
 
 @pytest.mark.parametrize(
     "instr, expected_outstr",
@@ -143,6 +154,82 @@ class RawTemplatedTestCase(NamedTuple):
                 "SELECT 1, 2\n",
             ],
         ),
+        RawTemplatedTestCase(
+            name="strip_and_templated_whitespace",
+            instr="SELECT {{- '  ' -}} 1{{ ' , 2' -}}\n",
+            templated_str="SELECT  1 , 2",
+            expected_templated_sliced__source_list=[
+                "SELECT",
+                " ",
+                "{{- '  ' -}}",
+                " ",
+                "1",
+                "{{ ' , 2' -}}",
+                "\n",
+            ],
+            expected_templated_sliced__templated_list=[
+                "SELECT",
+                "",  # Placeholder for consumed whitespace
+                "  ",  # Placeholder for templated whitespace
+                "",  # Placeholder for consumed whitespace
+                "1",
+                " , 2",
+                "",  # Placeholder for consumed newline
+            ],
+            expected_raw_sliced__source_list=[
+                "SELECT",
+                " ",
+                "{{- '  ' -}}",
+                " ",
+                "1",
+                "{{ ' , 2' -}}",
+                "\n",
+            ],
+        ),
+        RawTemplatedTestCase(
+            name="strip_both_block_hard",
+            instr="SELECT {%- set x = 42 %} 1 {%- if true -%} , 2{% endif -%}\n",
+            templated_str="SELECT 1, 2",
+            expected_templated_sliced__source_list=[
+                "SELECT",
+                # NB: Even though the jinja tag consumes whitespace, we still
+                # get it here as a placeholder.
+                " ",
+                "{%- set x = 42 %}",
+                " 1",
+                # This whitespace is a seperate from the 1 because it's consumed.
+                " ",
+                "{%- if true -%}",
+                " ",
+                ", 2",
+                "{% endif -%}",
+                "\n",
+            ],
+            expected_templated_sliced__templated_list=[
+                "SELECT",
+                "",  # Consumed whitespace placeholder
+                "",  # Jinja block placeholder
+                " 1",
+                "",  # Consumed whitespace
+                "",  # Jinja block placeholder
+                "",  # More consumed whitespace
+                ", 2",
+                "",  # Jinja block
+                "",  # Consumed final newline.
+            ],
+            expected_raw_sliced__source_list=[
+                "SELECT",
+                " ",
+                "{%- set x = 42 %}",
+                " 1",
+                " ",
+                "{%- if true -%}",
+                " ",
+                ", 2",
+                "{% endif -%}",
+                "\n",
+            ],
+        ),
         RawTemplatedTestCase(
             name="basic_data",
             instr="""select
@@ -579,6 +666,7 @@ def assert_structure(yaml_loader, path, code_only=True, include_meta=False):
         ("jinja_c_dbt/dbt_builtins_source", True, False),
         ("jinja_c_dbt/dbt_builtins_this", True, False),
         ("jinja_c_dbt/dbt_builtins_var_default", True, False),
+        ("jinja_c_dbt/dbt_builtins_test", True, False),
         # do directive
         ("jinja_e/jinja", True, False),
         # case sensitivity and python literals
@@ -600,6 +688,9 @@ def assert_structure(yaml_loader, path, code_only=True, include_meta=False):
         ("jinja_l_metas/004", False, True),
         ("jinja_l_metas/005", False, True),
         ("jinja_l_metas/006", False, True),
+        ("jinja_l_metas/007", False, True),
+        ("jinja_l_metas/008", False, True),
+        ("jinja_l_metas/009", False, True),
         # Library Loading from a folder when library is module
         ("jinja_m_libraries_module/jinja", True, False),
         ("jinja_n_nested_macros/jinja", True, False),
@@ -636,7 +727,11 @@ def test__templater_jinja_block_matching(caplog):
     template_segments = [
         seg
         for seg in parsed.raw_segments
-        if seg.is_type("template_loop", "placeholder")
+        if seg.is_type("template_loop")
+        or (
+            seg.is_type("placeholder")
+            and seg.block_type in ("block_start", "block_end", "block_mid")
+        )
     ]
 
     # Group them together by block UUID
@@ -654,9 +749,18 @@ def test__templater_jinja_block_matching(caplog):
     groups = {
         "for actions clause 1": [(6, 5), (9, 5), (12, 5), (15, 5)],
         "for actions clause 2": [(17, 5), (21, 5), (29, 5), (37, 5)],
-        "if loop.first 1": [(18, 9), (20, 9)],
-        "if loop.first 2": [(22, 9), (28, 9)],
-        "if loop.first 3": [(30, 9), (36, 9)],
+        # NOTE: all the if loop clauses are grouped together.
+        "if loop.first": [
+            (18, 9),
+            (20, 9),
+            (20, 9),
+            (22, 9),
+            (22, 9),
+            (28, 9),
+            (30, 9),
+            (30, 9),
+            (36, 9),
+        ],
     }
 
     # Check all are accounted for:
@@ -730,6 +834,51 @@ select 1 from foobarfoobarfoobarfoobar_{{ "dev" }}
                 ("\n", "literal", 97),
             ],
         ),
+        # Tests for jinja blocks that consume whitespace.
+        (
+            """SELECT 1 FROM {%+if true-%} {{ref('foo')}} {%-endif%}""",
+            [
+                ("SELECT 1 FROM ", "literal", 0),
+                ("{%+if true-%}", "block_start", 14),
+                (" ", "literal", 27),
+                ("{{ref('foo')}}", "templated", 28),
+                (" ", "literal", 42),
+                ("{%-endif%}", "block_end", 43),
+            ],
+        ),
+        (
+            """{% for item in some_list -%}
+    SELECT *
+    FROM some_table
+{{ "UNION ALL\n" if not loop.last }}
+{%- endfor %}""",
+            [
+                ("{% for item in some_list -%}", "block_start", 0),
+                # This gets consumed in the templated file, but it's still here.
+                ("\n    ", "literal", 28),
+                ("SELECT *\n    FROM some_table\n", "literal", 33),
+                ('{{ "UNION ALL\n" if not loop.last }}', "templated", 62),
+                ("\n", "literal", 97),
+                ("{%- endfor %}", "block_end", 98),
+            ],
+        ),
+        (
+            JINJA_MACRO_CALL_SQL,
+            [
+                ("{% macro render_name(title) %}", "block_start", 0),
+                ("\n" "  '", "literal", 30),
+                ("{{ title }}", "templated", 34),
+                (". foo' as ", "literal", 45),
+                ("{{ caller() }}", "templated", 55),
+                ("\n", "literal", 69),
+                ("{% endmacro %}", "block_end", 70),
+                ("\n" "SELECT\n" "    ", "literal", 84),
+                ("{% call render_name('Sir') %}", "block_start", 96),
+                ("\n" "        bar\n" "    ", "literal", 125),
+                ("{% endcall %}", "block_end", 142),
+                ("\n" "FROM baz\n", "literal", 155),
+            ],
+        ),
     ],
 )
 def test__templater_jinja_slice_template(test, result):
@@ -755,7 +904,8 @@ def test__templater_jinja_slice_template(test, result):
 
 
 def _statement(*args, **kwargs):
-    return "_statement"
+    # NOTE: The standard dbt statement() call returns nothing.
+    return ""
 
 
 def _load_result(*args, **kwargs):
@@ -908,6 +1058,18 @@ SELECT 1
                 ("literal", slice(42, 53, None), slice(18, 29, None)),
             ],
         ),
+        (
+            # Tests Jinja "import" directive.
+            """{% import 'echo.sql' as echo %}
+
+SELECT 1
+""",
+            None,
+            [
+                ("templated", slice(0, 31, None), slice(0, 0, None)),
+                ("literal", slice(31, 42, None), slice(0, 11, None)),
+            ],
+        ),
         (
             # Tests Jinja "from import" directive..
             """{% from 'echo.sql' import echo %}
@@ -919,9 +1081,9 @@ SELECT
 """,
             None,
             [
-                ("block_start", slice(0, 33, None), slice(0, 0, None)),
+                ("templated", slice(0, 33, None), slice(0, 0, None)),
                 ("literal", slice(33, 34, None), slice(0, 1, None)),
-                ("block_start", slice(34, 75, None), slice(1, 1, None)),
+                ("templated", slice(34, 75, None), slice(1, 1, None)),
                 ("literal", slice(75, 88, None), slice(1, 14, None)),
                 ("templated", slice(88, 105, None), slice(14, 19, None)),
                 ("literal", slice(105, 111, None), slice(19, 25, None)),
@@ -1188,30 +1350,52 @@ FROM {{ j }}{{ self.table_name() }}
             ],
         ),
         (
-            """{{ statement('variables', fetch_result=true) }}
-""",
+            "{{ statement('variables', fetch_result=true) }}\n",
             dict(
                 statement=_statement,
                 load_result=_load_result,
             ),
             [
-                ("templated", slice(0, 47, None), slice(0, 10, None)),
-                ("literal", slice(47, 48, None), slice(10, 11, None)),
+                ("templated", slice(0, 47, None), slice(0, 0, None)),
+                ("literal", slice(47, 48, None), slice(0, 1, None)),
             ],
         ),
         (
-            "{% call statement('variables', fetch_result=true) %}"
-            "select 1 as test"
-            "{% endcall %}\n",
+            "{% call statement('variables', fetch_result=true) %}\n"
+            "select 1 as test\n"
+            "{% endcall %}\n"
+            "select 2 as foo\n",
             dict(
                 statement=_statement,
                 load_result=_load_result,
             ),
             [
-                ("templated", slice(0, 52, None), slice(0, 10, None)),
-                ("literal", slice(52, 68, None), slice(10, 10, None)),
-                ("block_end", slice(68, 81, None), slice(10, 10, None)),
-                ("literal", slice(81, 82, None), slice(10, 11, None)),
+                ("block_start", slice(0, 52, None), slice(0, 0, None)),
+                ("literal", slice(52, 70, None), slice(0, 0, None)),
+                ("block_end", slice(70, 83, None), slice(0, 0, None)),
+                ("literal", slice(83, 100, None), slice(0, 17, None)),
+            ],
+        ),
+        (
+            JINJA_MACRO_CALL_SQL,
+            None,
+            [
+                # First all of this is the call block.
+                ("block_start", slice(0, 30, None), slice(0, 0, None)),
+                ("literal", slice(30, 34, None), slice(0, 0, None)),
+                ("templated", slice(34, 45, None), slice(0, 0, None)),
+                ("literal", slice(45, 55, None), slice(0, 0, None)),
+                ("templated", slice(55, 69, None), slice(0, 0, None)),
+                ("literal", slice(69, 70, None), slice(0, 0, None)),
+                ("block_end", slice(70, 84, None), slice(0, 0, None)),
+                # Then the actual query.
+                ("literal", slice(84, 96, None), slice(0, 12, None)),
+                # The block_start (call) contains the actual content.
+                ("block_start", slice(96, 125, None), slice(12, 47, None)),
+                # The middle and end of the call, have zero length in the template
+                ("literal", slice(125, 142, None), slice(47, 47, None)),
+                ("block_end", slice(142, 155, None), slice(47, 47, None)),
+                ("literal", slice(155, 165, None), slice(47, 57, None)),
             ],
         ),
     ],
diff --git a/test/dialects/ansi_test.py b/test/dialects/ansi_test.py
index 01009d3..a748574 100644
--- a/test/dialects/ansi_test.py
+++ b/test/dialects/ansi_test.py
@@ -190,19 +190,23 @@ def test__dialect__ansi_is_whitespace():
 
 
 @pytest.mark.parametrize(
-    "sql_string, indented_joins,meta_loc",
+    "sql_string, indented_joins, meta_loc",
     [
-        ("select field_1 from my_table as alias_1", True, (1, 5, 8, 14, 15)),
-        ("select field_1 from my_table as alias_1", False, (1, 5, 8, 14, 15)),
+        (
+            "select field_1 from my_table as alias_1",
+            True,
+            (1, 5, 8, 11, 15, 16, 17, 18, 19),
+        ),
+        ("select field_1 from my_table as alias_1", False, (1, 5, 8, 11, 15, 16, 17)),
         (
             "select field_1 from my_table as alias_1 join foo using (field_1)",
             True,
-            (1, 5, 8, 16, 21, 24, 26, 28, 29, 30, 31),
+            (1, 5, 8, 11, 15, 17, 18, 20, 24, 25, 27, 30, 32, 34, 35, 36, 37),
         ),
         (
             "select field_1 from my_table as alias_1 join foo using (field_1)",
             False,
-            (1, 5, 8, 15, 17, 22, 25, 27, 29, 30, 31),
+            (1, 5, 8, 11, 15, 17, 19, 23, 24, 26, 29, 31, 33, 34, 35),
         ),
     ],
 )
diff --git a/test/dialects/dialects_test.py b/test/dialects/dialects_test.py
index dc42d15..17ec8a6 100644
--- a/test/dialects/dialects_test.py
+++ b/test/dialects/dialects_test.py
@@ -7,7 +7,8 @@ import logging
 from typing import Any, Dict, Optional
 import pytest
 
-from sqlfluff.core.parser import Parser, Lexer
+from sqlfluff.core.templaters import TemplatedFile
+from sqlfluff.core.linter import RenderedFile, ParsedString
 from sqlfluff.core import FluffConfig, Linter
 from sqlfluff.core.parser.segments.base import BaseSegment
 
@@ -24,64 +25,98 @@ parse_success_examples, parse_structure_examples = get_parse_fixtures(
 )
 
 
-def lex_and_parse(config_overrides: Dict[str, Any], raw: str) -> Optional[BaseSegment]:
+def lex_and_parse(config_overrides: Dict[str, Any], raw: str) -> Optional[ParsedString]:
     """Performs a Lex and Parse, with cacheable inputs within fixture."""
     # Load the right dialect
     config = FluffConfig(overrides=config_overrides)
-    tokens, lex_vs = Lexer(config=config).lex(raw)
-    # From just the initial parse, check we're all there
-    assert "".join(token.raw for token in tokens) == raw
-    # Check we don't have lexing issues
-    assert not lex_vs
-    # TODO: Handle extremely verbose logging
-    # temp - use negative grep: | grep -v "INFO\|DEBUG\|\[L\|#\|Initial\|^$"
-    # better maybe - https://docs.pytest.org/en/6.2.x/logging.html#caplog-fixture
-
-    if not raw:
+    # Construct rendered file (to skip the templater)
+    templated_file = TemplatedFile.from_string(raw)
+    rendered_file = RenderedFile(
+        templated_file,
+        [],
+        config,
+        {},
+        templated_file.fname,
+        "utf8",
+        raw,
+    )
+    # Parse (which includes lexing)
+    linter = Linter(config=config)
+    parsed_file = linter.parse_rendered(rendered_file)
+    if not raw:  # Empty file case
+        # We're just checking there aren't exceptions in this case.
         return None
-
-    return Parser(config=config).parse(tokens)
+    # Check we managed to parse
+    assert parsed_file.tree
+    # From just the initial parse, check we're all there
+    assert "".join(token.raw for token in parsed_file.tree.raw_segments) == raw
+    # Check we don't have lexing or parsing issues
+    assert not parsed_file.violations
+    return parsed_file
 
 
+@pytest.mark.integration
+@pytest.mark.parse_suite
 @pytest.mark.parametrize("dialect,file", parse_success_examples)
 def test__dialect__base_file_parse(dialect, file):
     """For given test examples, check successful parsing."""
     raw = load_file(dialect, file)
     config_overrides = dict(dialect=dialect)
     # Use the helper function to avoid parsing twice
-    parsed: Optional[BaseSegment] = lex_and_parse(config_overrides, raw)
-    if not parsed:
+    parsed: Optional[ParsedString] = lex_and_parse(config_overrides, raw)
+    if not parsed:  # Empty file case
         return
-
-    print(f"Post-parse structure: {parsed.to_tuple(show_raw=True)}")
-    print(f"Post-parse structure: {parsed.stringify()}")
+    print(f"Post-parse structure: {parsed.tree.to_tuple(show_raw=True)}")
+    print(f"Post-parse structure: {parsed.tree.stringify()}")
     # Check we're all there.
-    assert parsed.raw == raw
+    assert parsed.tree.raw == raw
     # Check that there's nothing unparsable
-    typs = parsed.type_set()
+    typs = parsed.tree.type_set()
     assert "unparsable" not in typs
 
 
-@pytest.mark.integration_test
+@pytest.mark.integration
+@pytest.mark.fix_suite
 @pytest.mark.parametrize("dialect,file", parse_success_examples)
 def test__dialect__base_broad_fix(
     dialect, file, raise_critical_errors_after_fix, caplog
 ):
-    """Run a full fix with all rules, in search of critical errors."""
+    """Run a full fix with all rules, in search of critical errors.
+
+    NOTE: This suite does all of the same things as the above test
+    suite (the `parse_suite`), but also runs fix. In CI, we run
+    the above tests _with_ coverage tracking, but these we run
+    _without_.
+
+    The purpose of this test is as a more stretching run through
+    a wide range of test sql examples, and the full range of rules
+    to find any potential critical errors raised by any interactions
+    between different dialects and rules.
+    """
     raw = load_file(dialect, file)
     config_overrides = dict(dialect=dialect)
-    # Lean on the cached result of the above test if possible
-    parsed: Optional[BaseSegment] = lex_and_parse(config_overrides, raw)
-    if not parsed:
+
+    parsed: Optional[ParsedString] = lex_and_parse(config_overrides, raw)
+    if not parsed:  # Empty file case
         return
+    else:
+        print(parsed.tree.stringify())
 
     config = FluffConfig(overrides=config_overrides)
-    # Due to "raise_critical_errors_after_fix" fixure "fix",
+    linter = Linter(config=config)
+    rule_pack = linter.get_rulepack()
+    # Due to "raise_critical_errors_after_fix" fixture "fix",
     # will now throw.
     with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules"):
-        Linter(config=config).lint_string(raw, fix=True)
+        linter.lint_parsed(
+            parsed,
+            rule_pack,
+            fix=True,
+        )
 
 
+@pytest.mark.integration
+@pytest.mark.parse_suite
 @pytest.mark.parametrize("dialect,sqlfile,code_only,yamlfile", parse_structure_examples)
 def test__dialect__base_parse_struct(
     dialect,
diff --git a/test/dialects/postgres_test.py b/test/dialects/postgres_test.py
index 116141a..0e24ac7 100644
--- a/test/dialects/postgres_test.py
+++ b/test/dialects/postgres_test.py
@@ -64,7 +64,7 @@ def test_epoch_datetime_unit(raw: str) -> None:
     """Test the EPOCH keyword for postgres dialect."""
     # Don't test for new lines or capitalisation
     cfg = FluffConfig(
-        configs={"core": {"exclude_rules": "L009,L016,L036", "dialect": "postgres"}}
+        configs={"core": {"exclude_rules": "LT12,LT05,LT09", "dialect": "postgres"}}
     )
     lnt = Linter(config=cfg)
     result = lnt.lint_string(raw)
@@ -81,7 +81,7 @@ def test_epoch_datetime_unit(raw: str) -> None:
 def test_space_is_not_reserved(raw: str) -> None:
     """Ensure that SPACE is not treated as reserved."""
     cfg = FluffConfig(
-        configs={"core": {"exclude_rules": "L009,L016,L031", "dialect": "postgres"}}
+        configs={"core": {"exclude_rules": "LT12,LT05,AL07", "dialect": "postgres"}}
     )
     lnt = Linter(config=cfg)
     result = lnt.lint_string(raw)
diff --git a/test/diff_quality_plugin_test.py b/test/diff_quality_plugin_test.py
index 11dc7f6..3167be7 100644
--- a/test/diff_quality_plugin_test.py
+++ b/test/diff_quality_plugin_test.py
@@ -10,16 +10,17 @@ from sqlfluff.utils.testing.cli import invoke_assert_code
 
 
 @pytest.mark.parametrize(
-    "sql_path,expected_violations_lines",
+    "sql_paths,expected_violations_lines",
     [
-        ("linter/indentation_errors.sql", list(range(2, 7))),
-        ("linter/parse_error.sql", {1}),
+        (("linter/indentation_errors.sql",), list(range(2, 7))),
+        (("linter/parse_error.sql",), {1}),
         # NB: This version of the file is in a directory configured
         # to ignore parsing errors.
-        ("linter/diffquality/parse_error.sql", []),
+        (("linter/diffquality/parse_error.sql",), []),
+        (tuple(), []),
     ],
 )
-def test_diff_quality_plugin(sql_path, expected_violations_lines, monkeypatch):
+def test_diff_quality_plugin(sql_paths, expected_violations_lines, monkeypatch):
     """Test the plugin at least finds errors on the expected lines."""
 
     def execute(command, exit_codes):
@@ -45,14 +46,19 @@ def test_diff_quality_plugin(sql_path, expected_violations_lines, monkeypatch):
     violation_reporter = diff_quality_plugin.diff_cover_report_quality(
         options="--processes=1"
     )
-    sql_path = str(Path(sql_path))
+    assert len(sql_paths) in (0, 1)
+    sql_paths = [str(Path(sql_path)) for sql_path in sql_paths]
 
-    violations_dict = violation_reporter.violations_batch([sql_path])
+    violations_dict = violation_reporter.violations_batch(sql_paths)
     assert isinstance(violations_dict, dict)
     if expected_violations_lines:
-        assert len(violations_dict[sql_path]) > 0
-        violations_lines = {v.line for v in violations_dict[sql_path]}
+        assert len(violations_dict[sql_paths[0]]) > 0
+        violations_lines = {v.line for v in violations_dict[sql_paths[0]]}
         for expected_line in expected_violations_lines:
             assert expected_line in violations_lines
     else:
-        assert len(violations_dict[sql_path]) == 0
+        assert (
+            len(violations_dict[sql_paths[0]]) == 0
+            if sql_paths
+            else len(violations_dict) == 0
+        )
diff --git a/test/fixtures/api/config_override/.sqlfluff b/test/fixtures/api/config_override/.sqlfluff
index 7eefb2a..f0ea039 100644
--- a/test/fixtures/api/config_override/.sqlfluff
+++ b/test/fixtures/api/config_override/.sqlfluff
@@ -1,2 +1,2 @@
 [sqlfluff]
-exclude_rules = L027,L029
+exclude_rules = RF02,RF04
diff --git a/test/fixtures/cli/disable_noqa_test.sql b/test/fixtures/cli/disable_noqa_test.sql
index 6fcade0..c873d58 100644
--- a/test/fixtures/cli/disable_noqa_test.sql
+++ b/test/fixtures/cli/disable_noqa_test.sql
@@ -2,5 +2,5 @@
 -- allows for inline noqa comments to be ignored.
 SELECT
     col_a AS a,
-    col_b as b  --noqa: L010
+    col_b as b  --noqa: CP01
 FROM t;
diff --git a/test/fixtures/cli/ignore_local_config/.sqlfluff b/test/fixtures/cli/ignore_local_config/.sqlfluff
index 05108a0..ccd640d 100644
--- a/test/fixtures/cli/ignore_local_config/.sqlfluff
+++ b/test/fixtures/cli/ignore_local_config/.sqlfluff
@@ -1,2 +1,2 @@
 [sqlfluff]
-exclude_rules = L012
+exclude_rules = AL02
diff --git a/test/fixtures/cli/ignore_local_config/ignore_local_config_test.sql b/test/fixtures/cli/ignore_local_config/ignore_local_config_test.sql
index 18cbc97..aa44ce9 100644
--- a/test/fixtures/cli/ignore_local_config/ignore_local_config_test.sql
+++ b/test/fixtures/cli/ignore_local_config/ignore_local_config_test.sql
@@ -1,4 +1,4 @@
--- This query raises L012.
+-- This query raises AL02.
 -- We exlude this rule in the .sqlfluff file and then test
 -- ignoring this config file via the --ignore-local-config CLI flag.
 SELECT col_a a
diff --git a/test/fixtures/cli/unknown_jinja_tag/.sqlfluff b/test/fixtures/cli/unknown_jinja_tag/.sqlfluff
index 34b8e2a..ce7e62c 100644
--- a/test/fixtures/cli/unknown_jinja_tag/.sqlfluff
+++ b/test/fixtures/cli/unknown_jinja_tag/.sqlfluff
@@ -3,3 +3,4 @@ dialect = ansi
 
 [sqlfluff:templater:jinja]
 load_macros_from_path = my_macros
+apply_dbt_builtins = False
diff --git a/test/fixtures/cli/warning_a.sql b/test/fixtures/cli/warning_a.sql
index 4540400..43fb6dc 100644
--- a/test/fixtures/cli/warning_a.sql
+++ b/test/fixtures/cli/warning_a.sql
@@ -1,4 +1,4 @@
 -- This file should fail _only_ for spacing around +
 -- We explicit configure that rule to only warn.
--- sqlfluff:warnings:L006
+-- sqlfluff:warnings:LT01
 SELECT 1+2
diff --git a/test/fixtures/config/glob_exclude/.sqlfluff b/test/fixtures/config/glob_exclude/.sqlfluff
index 95bda7d..485f21a 100644
--- a/test/fixtures/config/glob_exclude/.sqlfluff
+++ b/test/fixtures/config/glob_exclude/.sqlfluff
@@ -1,2 +1,2 @@
 [sqlfluff]
-exclude_rules = L05*,L027
+exclude_rules = L05*,RF02
diff --git a/test/fixtures/config/glob_exclude/test.sql b/test/fixtures/config/glob_exclude/test.sql
index fff43b9..cc6de1c 100644
--- a/test/fixtures/config/glob_exclude/test.sql
+++ b/test/fixtures/config/glob_exclude/test.sql
@@ -2,9 +2,11 @@
 /*
 Denylist glob test
 
-This query violates L027, L044, L050, L051, and L052.
-When we exclude L05*,L027 in the config we expect L027, L050, L051,
-and L052 to be ignored by the linter.
+This query violates RF02, AM04, LT13, AM05, and CV06.
+When we exclude L05*,RF02 in the config we expect RF02, LT13, AM05,
+and CV06 to be ignored by the linter.
+- AM05 because it's alias is L051
+- CV06 because it's alias is L052
 */
 
 SELECT *
diff --git a/test/fixtures/config/glob_include/.sqlfluff b/test/fixtures/config/glob_include/.sqlfluff
index 09bdcc1..b84d311 100644
--- a/test/fixtures/config/glob_include/.sqlfluff
+++ b/test/fixtures/config/glob_include/.sqlfluff
@@ -1,5 +1,5 @@
 [sqlfluff]
-rules = L05*,L027
+rules = L05*,RF02
 
-[sqlfluff:rules:L052]  # Semi-colon formatting approach.
+[sqlfluff:rules:convention.terminator]  # Semi-colon formatting approach.
 require_final_semicolon = True
diff --git a/test/fixtures/config/glob_include/test.sql b/test/fixtures/config/glob_include/test.sql
index 6159ae4..ab9170a 100644
--- a/test/fixtures/config/glob_include/test.sql
+++ b/test/fixtures/config/glob_include/test.sql
@@ -2,9 +2,11 @@
 /*
 Allowlist glob test
 
-This query violates L027, L044, L050, L051, and L052.
-When we include L05*,L027 in the config we expect L027, L050, L051,
-and L052 only to be raised by the linter.
+This query violates RF02, AM04, LT13, AM05, and CV06.
+When we include L05*,RF02 in the config we expect RF02, LT13, AM05,
+and CV06 only to be raised by the linter.
+- AM05 because it's alias is L051
+- CV06 because it's alias is L052
 */
 
 SELECT *
diff --git a/test/fixtures/config/inheritance_b/example.sql b/test/fixtures/config/inheritance_b/example.sql
index 014ea29..3b695b3 100644
--- a/test/fixtures/config/inheritance_b/example.sql
+++ b/test/fixtures/config/inheritance_b/example.sql
@@ -1 +1 @@
-  	SELECT 1    
\ No newline at end of file
+   SELeCT  fOo
\ No newline at end of file
diff --git a/test/fixtures/config/inheritance_b/nested/.sqlfluff b/test/fixtures/config/inheritance_b/nested/.sqlfluff
index 029e344..df2bc62 100644
--- a/test/fixtures/config/inheritance_b/nested/.sqlfluff
+++ b/test/fixtures/config/inheritance_b/nested/.sqlfluff
@@ -1,3 +1,3 @@
 [sqlfluff]
-rules=L002,L003,L009
-exclude_rules=L004
+rules=LT01,LT02,CP01,CP02
+exclude_rules=CP03
diff --git a/test/fixtures/config/inheritance_b/nested/example.sql b/test/fixtures/config/inheritance_b/nested/example.sql
index 014ea29..3b695b3 100644
--- a/test/fixtures/config/inheritance_b/nested/example.sql
+++ b/test/fixtures/config/inheritance_b/nested/example.sql
@@ -1 +1 @@
-  	SELECT 1    
\ No newline at end of file
+   SELeCT  fOo
\ No newline at end of file
diff --git a/test/fixtures/config/inheritance_b/tox.ini b/test/fixtures/config/inheritance_b/tox.ini
index e254de1..0449dab 100644
--- a/test/fixtures/config/inheritance_b/tox.ini
+++ b/test/fixtures/config/inheritance_b/tox.ini
@@ -1,3 +1,3 @@
 [sqlfluff]
-rules=L002,L003
-exclude_rules=L003
+rules=LT01,CP01,CP02
+exclude_rules=CP01
diff --git a/test/fixtures/config/rules_group_with_exclude/.sqlfluff b/test/fixtures/config/rules_group_with_exclude/.sqlfluff
index f522b59..038a59b 100644
--- a/test/fixtures/config/rules_group_with_exclude/.sqlfluff
+++ b/test/fixtures/config/rules_group_with_exclude/.sqlfluff
@@ -1,3 +1,3 @@
 [sqlfluff]
 rules = core
-exclude_rules = L019
+exclude_rules = LT04
diff --git a/test/fixtures/config/rules_group_with_exclude/test.sql b/test/fixtures/config/rules_group_with_exclude/test.sql
index cc5c56f..fc96296 100644
--- a/test/fixtures/config/rules_group_with_exclude/test.sql
+++ b/test/fixtures/config/rules_group_with_exclude/test.sql
@@ -5,7 +5,7 @@ Rules group with exclude rules test
 If some monster wants to run the core rules, but at the same
 time allow trailing and leading commas, then they can do that now
 
-This query should only trigger L010
+This query should only trigger CP01
 */
 
 SELECT
diff --git a/test/fixtures/config/rules_set_to_none/test.sql b/test/fixtures/config/rules_set_to_none/test.sql
index e9a2edb..b47a544 100644
--- a/test/fixtures/config/rules_set_to_none/test.sql
+++ b/test/fixtures/config/rules_set_to_none/test.sql
@@ -6,7 +6,7 @@ The previous default setting for rules was
 'None' which meant all rules would be run. The
 new default is 'all', but having rules = None should
 still run all rules, meaning this query will trigger
-L050,L044, and L010
+LT13,AM04, and CP01
 */
 
 SELECT * from bar
diff --git a/test/fixtures/config/toml/pyproject.toml b/test/fixtures/config/toml/pyproject.toml
index cda1fc2..ab6081e 100644
--- a/test/fixtures/config/toml/pyproject.toml
+++ b/test/fixtures/config/toml/pyproject.toml
@@ -6,9 +6,13 @@ testing_bar = 7.698
 testing_bool = false
 testing_arr = [ "a", "b", "c" ]
 testing_inline_table = { x = 1 }
+rules = ["LT03", "LT09"]
 
 [tool.sqlfluff.bar]
 foo = "foobar"
 
 [tool.sqlfluff.fnarr.fnarr]
 foo = "foobar"
+
+[tool.sqlfluff.rules.capitalisation.keywords]
+capitalisation_policy = "upper"
diff --git a/test/fixtures/dialects/ansi/ansi_cast_with_whitespaces.yml b/test/fixtures/dialects/ansi/ansi_cast_with_whitespaces.yml
index 5d9ce35..f1b2af7 100644
--- a/test/fixtures/dialects/ansi/ansi_cast_with_whitespaces.yml
+++ b/test/fixtures/dialects/ansi/ansi_cast_with_whitespaces.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 16bffa5df2ab1ccf240d9e0f886e0892fb7653fec079027c2e0957b8dda7bc09
+_hash: 98901f41139ba11863745752a671f49b443c94a77a7fd6ece89c6f40ffb33aff
 file:
 - statement:
     select_statement:
@@ -137,11 +137,11 @@ file:
               casting_operator: '::'
               data_type:
                 data_type_identifier: VARCHAR
-                bracketed:
-                  start_bracket: (
-                  expression:
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
                     numeric_literal: '512'
-                  end_bracket: )
+                    end_bracket: )
       from_clause:
         keyword: FROM
         from_expression:
@@ -231,11 +231,11 @@ file:
                   casting_operator: '::'
                   data_type:
                     data_type_identifier: VARCHAR
-                    bracketed:
-                      start_bracket: (
-                      expression:
+                    bracketed_arguments:
+                      bracketed:
+                        start_bracket: (
                         numeric_literal: '512'
-                      end_bracket: )
+                        end_bracket: )
               - comparison_operator:
                   raw_comparison_operator: '='
               - cast_expression:
@@ -246,11 +246,11 @@ file:
                   casting_operator: '::'
                   data_type:
                     data_type_identifier: VARCHAR
-                    bracketed:
-                      start_bracket: (
-                      expression:
+                    bracketed_arguments:
+                      bracketed:
+                        start_bracket: (
                         numeric_literal: '512'
-                      end_bracket: )
+                        end_bracket: )
       where_clause:
         keyword: WHERE
         expression:
diff --git a/test/fixtures/dialects/ansi/arithmetic_a.sql b/test/fixtures/dialects/ansi/arithmetic_a.sql
index 27c56bb..2b3a5f7 100644
--- a/test/fixtures/dialects/ansi/arithmetic_a.sql
+++ b/test/fixtures/dialects/ansi/arithmetic_a.sql
@@ -1 +1,49 @@
-SELECT 1 + (2 * 3) >= 4 + 6+13 as val
+SELECT 1 + (2 * 3) >= 4 + 6+13 as val;
+
+SELECT 1 + ~(~2 * 3) >= 4 + ~6+13 as val;
+
+SELECT -1;
+
+SELECT -1 + 5;
+
+SELECT ~1;
+
+SELECT -1 + ~5;
+
+SELECT 4 & ~8 | 16;
+
+SELECT 8 + ~(3);
+
+SELECT 8 | ~ ~ ~4;
+
+SELECT 1 * -(5);
+
+SELECT 1 * -5;
+
+SELECT 1 * - - - 5;
+
+SELECT 1 * - - - (5);
+
+SELECT 1 * + + (5);
+
+SELECT 1 * - - - func(5);
+
+SELECT 1 * ~ ~ ~ func(5);
+
+SELECT 1 * +(5);
+
+SELECT 1 * +5;
+
+SELECT 1 * + + 5;
+
+SELECT FALSE AND NOT (TRUE);
+
+SELECT FALSE AND NOT NOT NOT (TRUE); -- parses middle NOT as column ref
+
+SELECT FALSE AND NOT (TRUE);
+
+SELECT FALSE AND NOT func(5);
+
+SELECT 'abc' LIKE - - 5; -- PG can parse this ok, and then fail due to data type mismatch
+
+SELECT 'abc' LIKE ~ ~ 5; -- PG can parse this ok, and then fail due to data type mismatch
diff --git a/test/fixtures/dialects/ansi/arithmetic_a.yml b/test/fixtures/dialects/ansi/arithmetic_a.yml
index 412ab24..2b71978 100644
--- a/test/fixtures/dialects/ansi/arithmetic_a.yml
+++ b/test/fixtures/dialects/ansi/arithmetic_a.yml
@@ -3,9 +3,9 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: b23bf006872ce2d785654df4060aa4534863987d9fc2e15d59bee0d96523d7d0
+_hash: 68162fcf003cf96cc1361038a52f8a6da37d5bee87037883ce33a4f2c011cf6f
 file:
-  statement:
+- statement:
     select_statement:
       select_clause:
         keyword: SELECT
@@ -31,3 +31,365 @@ file:
           alias_expression:
             keyword: as
             naked_identifier: val
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - numeric_literal: '1'
+          - binary_operator: +
+          - tilde: '~'
+          - bracketed:
+              start_bracket: (
+              expression:
+              - tilde: '~'
+              - numeric_literal: '2'
+              - binary_operator: '*'
+              - numeric_literal: '3'
+              end_bracket: )
+          - comparison_operator:
+            - raw_comparison_operator: '>'
+            - raw_comparison_operator: '='
+          - numeric_literal: '4'
+          - binary_operator: +
+          - tilde: '~'
+          - numeric_literal: '6'
+          - binary_operator: +
+          - numeric_literal: '13'
+          alias_expression:
+            keyword: as
+            naked_identifier: val
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          numeric_literal:
+            sign_indicator: '-'
+            numeric_literal: '1'
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - numeric_literal:
+              sign_indicator: '-'
+              numeric_literal: '1'
+          - binary_operator: +
+          - numeric_literal: '5'
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+            tilde: '~'
+            numeric_literal: '1'
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - numeric_literal:
+              sign_indicator: '-'
+              numeric_literal: '1'
+          - binary_operator: +
+          - tilde: '~'
+          - numeric_literal: '5'
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - numeric_literal: '4'
+          - binary_operator:
+              ampersand: '&'
+          - tilde: '~'
+          - numeric_literal: '8'
+          - binary_operator:
+              pipe: '|'
+          - numeric_literal: '16'
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+            numeric_literal: '8'
+            binary_operator: +
+            tilde: '~'
+            bracketed:
+              start_bracket: (
+              expression:
+                numeric_literal: '3'
+              end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - numeric_literal: '8'
+          - binary_operator:
+              pipe: '|'
+          - tilde: '~'
+          - tilde: '~'
+          - tilde: '~'
+          - numeric_literal: '4'
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+            numeric_literal: '1'
+            binary_operator: '*'
+            sign_indicator: '-'
+            bracketed:
+              start_bracket: (
+              expression:
+                numeric_literal: '5'
+              end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - numeric_literal: '1'
+          - binary_operator: '*'
+          - numeric_literal:
+              sign_indicator: '-'
+              numeric_literal: '5'
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - numeric_literal: '1'
+          - binary_operator: '*'
+          - sign_indicator: '-'
+          - sign_indicator: '-'
+          - numeric_literal:
+              sign_indicator: '-'
+              numeric_literal: '5'
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - numeric_literal: '1'
+          - binary_operator: '*'
+          - sign_indicator: '-'
+          - sign_indicator: '-'
+          - sign_indicator: '-'
+          - bracketed:
+              start_bracket: (
+              expression:
+                numeric_literal: '5'
+              end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - numeric_literal: '1'
+          - binary_operator: '*'
+          - sign_indicator: +
+          - sign_indicator: +
+          - bracketed:
+              start_bracket: (
+              expression:
+                numeric_literal: '5'
+              end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - numeric_literal: '1'
+          - binary_operator: '*'
+          - sign_indicator: '-'
+          - sign_indicator: '-'
+          - sign_indicator: '-'
+          - function:
+              function_name:
+                function_name_identifier: func
+              bracketed:
+                start_bracket: (
+                expression:
+                  numeric_literal: '5'
+                end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - numeric_literal: '1'
+          - binary_operator: '*'
+          - tilde: '~'
+          - tilde: '~'
+          - tilde: '~'
+          - function:
+              function_name:
+                function_name_identifier: func
+              bracketed:
+                start_bracket: (
+                expression:
+                  numeric_literal: '5'
+                end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+            numeric_literal: '1'
+            binary_operator: '*'
+            sign_indicator: +
+            bracketed:
+              start_bracket: (
+              expression:
+                numeric_literal: '5'
+              end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - numeric_literal: '1'
+          - binary_operator: '*'
+          - numeric_literal:
+              sign_indicator: +
+              numeric_literal: '5'
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - numeric_literal: '1'
+          - binary_operator: '*'
+          - sign_indicator: +
+          - numeric_literal:
+              sign_indicator: +
+              numeric_literal: '5'
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+            boolean_literal: 'FALSE'
+            binary_operator: AND
+            keyword: NOT
+            bracketed:
+              start_bracket: (
+              expression:
+                boolean_literal: 'TRUE'
+              end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - boolean_literal: 'FALSE'
+          - binary_operator: AND
+          - keyword: NOT
+          - keyword: NOT
+          - keyword: NOT
+          - bracketed:
+              start_bracket: (
+              expression:
+                boolean_literal: 'TRUE'
+              end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+            boolean_literal: 'FALSE'
+            binary_operator: AND
+            keyword: NOT
+            bracketed:
+              start_bracket: (
+              expression:
+                boolean_literal: 'TRUE'
+              end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+            boolean_literal: 'FALSE'
+            binary_operator: AND
+            keyword: NOT
+            function:
+              function_name:
+                function_name_identifier: func
+              bracketed:
+                start_bracket: (
+                expression:
+                  numeric_literal: '5'
+                end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+            quoted_literal: "'abc'"
+            keyword: LIKE
+            sign_indicator: '-'
+            numeric_literal:
+              sign_indicator: '-'
+              numeric_literal: '5'
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - quoted_literal: "'abc'"
+          - keyword: LIKE
+          - tilde: '~'
+          - tilde: '~'
+          - numeric_literal: '5'
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/ansi/bracketed_statement.yml b/test/fixtures/dialects/ansi/bracketed_statement.yml
index 2d08da2..5f79481 100644
--- a/test/fixtures/dialects/ansi/bracketed_statement.yml
+++ b/test/fixtures/dialects/ansi/bracketed_statement.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 1bd3bf29ea32b3755ae9e53698307eed6b5869d3d41528a2e38e3dfc64d14c6d
+_hash: a7028eaa159caaa519f3691bc92ad59783a0e192e83b0435f5633b589c6f7847
 file:
 - statement:
     bracketed:
@@ -18,32 +18,29 @@ file:
 - statement:
     bracketed:
       start_bracket: (
-      statement:
-        bracketed:
-          start_bracket: (
-          select_statement:
-            select_clause:
-              keyword: SELECT
-              select_clause_element:
-                numeric_literal: '1'
-          end_bracket: )
+      bracketed:
+        start_bracket: (
+        select_statement:
+          select_clause:
+            keyword: SELECT
+            select_clause_element:
+              numeric_literal: '1'
+        end_bracket: )
       end_bracket: )
 - statement_terminator: ;
 - statement:
     bracketed:
       start_bracket: (
-      statement:
+      bracketed:
+        start_bracket: (
         bracketed:
           start_bracket: (
-          statement:
-            bracketed:
-              start_bracket: (
-              select_statement:
-                select_clause:
-                  keyword: SELECT
-                  select_clause_element:
-                    numeric_literal: '1'
-              end_bracket: )
+          select_statement:
+            select_clause:
+              keyword: SELECT
+              select_clause_element:
+                numeric_literal: '1'
           end_bracket: )
+        end_bracket: )
       end_bracket: )
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/ansi/create_cast.sql b/test/fixtures/dialects/ansi/create_cast.sql
new file mode 100644
index 0000000..88749ed
--- /dev/null
+++ b/test/fixtures/dialects/ansi/create_cast.sql
@@ -0,0 +1,25 @@
+CREATE CAST (int AS bool) WITH FUNCTION fname;
+
+CREATE CAST (int AS bool) WITH FUNCTION fname AS ASSIGNMENT;
+
+CREATE CAST (int AS bool) WITH FUNCTION fname();
+
+CREATE CAST (int AS bool) WITH FUNCTION fname(bool);
+
+CREATE CAST (int AS bool) WITH FUNCTION sch.fname(int, bool) AS ASSIGNMENT;
+
+CREATE CAST (udt_1 AS udt_2) WITH FUNCTION fname(udt_1, udt_2) FOR udt_3;
+
+CREATE CAST (sch.udt_1 AS sch.udt_2) WITH FUNCTION sch.fname(sch.udt_1, sch.udt_2) FOR sch.udt_3;
+
+CREATE CAST (int AS bool) WITH ROUTINE fname();
+
+CREATE CAST (int AS bool) WITH PROCEDURE fname();
+
+CREATE CAST (int AS bool) WITH METHOD fname();
+
+CREATE CAST (int AS bool) WITH INSTANCE METHOD fname();
+
+CREATE CAST (int AS bool) WITH STATIC METHOD fname();
+
+CREATE CAST (int AS bool) WITH CONSTRUCTOR METHOD fname();
diff --git a/test/fixtures/dialects/ansi/create_cast.yml b/test/fixtures/dialects/ansi/create_cast.yml
new file mode 100644
index 0000000..f238293
--- /dev/null
+++ b/test/fixtures/dialects/ansi/create_cast.yml
@@ -0,0 +1,316 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 54848c7eae3e3c3c50ab04feca39d8798be72e8e24009c02366c2b4083750362
+file:
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: int
+      - keyword: AS
+      - data_type:
+          data_type_identifier: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        function_name_identifier: fname
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: int
+      - keyword: AS
+      - data_type:
+          data_type_identifier: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        function_name_identifier: fname
+    - keyword: AS
+    - keyword: ASSIGNMENT
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: int
+      - keyword: AS
+      - data_type:
+          data_type_identifier: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: int
+      - keyword: AS
+      - data_type:
+          data_type_identifier: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          data_type:
+            data_type_identifier: bool
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: int
+      - keyword: AS
+      - data_type:
+          data_type_identifier: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        naked_identifier: sch
+        dot: .
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+        - start_bracket: (
+        - data_type:
+            data_type_identifier: int
+        - comma: ','
+        - data_type:
+            data_type_identifier: bool
+        - end_bracket: )
+    - keyword: AS
+    - keyword: ASSIGNMENT
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: udt_1
+      - keyword: AS
+      - data_type:
+          data_type_identifier: udt_2
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+        - start_bracket: (
+        - data_type:
+            data_type_identifier: udt_1
+        - comma: ','
+        - data_type:
+            data_type_identifier: udt_2
+        - end_bracket: )
+    - keyword: FOR
+    - object_reference:
+        naked_identifier: udt_3
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          naked_identifier: sch
+          dot: .
+          data_type_identifier: udt_1
+      - keyword: AS
+      - data_type:
+          naked_identifier: sch
+          dot: .
+          data_type_identifier: udt_2
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        naked_identifier: sch
+        dot: .
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+        - start_bracket: (
+        - data_type:
+            naked_identifier: sch
+            dot: .
+            data_type_identifier: udt_1
+        - comma: ','
+        - data_type:
+            naked_identifier: sch
+            dot: .
+            data_type_identifier: udt_2
+        - end_bracket: )
+    - keyword: FOR
+    - object_reference:
+      - naked_identifier: sch
+      - dot: .
+      - naked_identifier: udt_3
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: int
+      - keyword: AS
+      - data_type:
+          data_type_identifier: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: ROUTINE
+    - function_name:
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: int
+      - keyword: AS
+      - data_type:
+          data_type_identifier: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: PROCEDURE
+    - function_name:
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: int
+      - keyword: AS
+      - data_type:
+          data_type_identifier: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: METHOD
+    - function_name:
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: int
+      - keyword: AS
+      - data_type:
+          data_type_identifier: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: INSTANCE
+    - keyword: METHOD
+    - function_name:
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: int
+      - keyword: AS
+      - data_type:
+          data_type_identifier: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: STATIC
+    - keyword: METHOD
+    - function_name:
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: int
+      - keyword: AS
+      - data_type:
+          data_type_identifier: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: CONSTRUCTOR
+    - keyword: METHOD
+    - function_name:
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/ansi/create_table.sql b/test/fixtures/dialects/ansi/create_table.sql
new file mode 100644
index 0000000..bf7de62
--- /dev/null
+++ b/test/fixtures/dialects/ansi/create_table.sql
@@ -0,0 +1,9 @@
+-- Test various forms of quoted data types
+CREATE TABLE foo (
+    pk int PRIMARY KEY,
+    quoted_name "custom udt",
+    qualified_name sch.qualified,
+    quoted_qualified "my schema".qualified,
+    more_quoted "my schema"."custom udt",
+    quoted_udt sch."custom udt"
+);
diff --git a/test/fixtures/dialects/ansi/create_table.yml b/test/fixtures/dialects/ansi/create_table.yml
new file mode 100644
index 0000000..8d6b241
--- /dev/null
+++ b/test/fixtures/dialects/ansi/create_table.yml
@@ -0,0 +1,57 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 4d508e6a17455867f424bf23fbf6c04cd8ec300f8aa8b13b6920ee8199a9944b
+file:
+  statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: foo
+    - bracketed:
+      - start_bracket: (
+      - column_definition:
+          naked_identifier: pk
+          data_type:
+            data_type_identifier: int
+          column_constraint_segment:
+          - keyword: PRIMARY
+          - keyword: KEY
+      - comma: ','
+      - column_definition:
+          naked_identifier: quoted_name
+          data_type:
+            quoted_identifier: '"custom udt"'
+      - comma: ','
+      - column_definition:
+          naked_identifier: qualified_name
+          data_type:
+            naked_identifier: sch
+            dot: .
+            data_type_identifier: qualified
+      - comma: ','
+      - column_definition:
+          naked_identifier: quoted_qualified
+          data_type:
+            quoted_identifier: '"my schema"'
+            dot: .
+            data_type_identifier: qualified
+      - comma: ','
+      - column_definition:
+          naked_identifier: more_quoted
+          data_type:
+          - quoted_identifier: '"my schema"'
+          - dot: .
+          - quoted_identifier: '"custom udt"'
+      - comma: ','
+      - column_definition:
+          naked_identifier: quoted_udt
+          data_type:
+            naked_identifier: sch
+            dot: .
+            quoted_identifier: '"custom udt"'
+      - end_bracket: )
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/ansi/create_table_a_column_constraints.sql b/test/fixtures/dialects/ansi/create_table_a_column_constraints.sql
index 2bee364..a4ca938 100644
--- a/test/fixtures/dialects/ansi/create_table_a_column_constraints.sql
+++ b/test/fixtures/dialects/ansi/create_table_a_column_constraints.sql
@@ -11,5 +11,6 @@ create table table1 (
     c6 INT REFERENCES table2 (c6_other) ON DELETE NO ACTION,
     c6 INT REFERENCES table2 (c6_other) ON UPDATE SET NULL,
     c6 INT REFERENCES table2 (c6_other) ON DELETE RESTRICT ON UPDATE CASCADE,
-    c7 INT NOT NULL DEFAULT 1 UNIQUE REFERENCES table3 (c7_other)
+    c7 INT NOT NULL DEFAULT 1 UNIQUE REFERENCES table3 (c7_other),
+    c8 INT NOT NULL DEFAULT 1::INT
 )
diff --git a/test/fixtures/dialects/ansi/create_table_a_column_constraints.yml b/test/fixtures/dialects/ansi/create_table_a_column_constraints.yml
index 7297d41..1f328bc 100644
--- a/test/fixtures/dialects/ansi/create_table_a_column_constraints.yml
+++ b/test/fixtures/dialects/ansi/create_table_a_column_constraints.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 20afc12c71a054d29ed4c94899e27b32231d963355d407c2ede40fde7a5ed4ad
+_hash: d7769d9eb54abd09aa301054592b03b6258fb68d97f01f894f52b65d67cb820e
 file:
   statement:
     create_table_statement:
@@ -194,4 +194,19 @@ file:
               column_reference:
                 naked_identifier: c7_other
               end_bracket: )
+      - comma: ','
+      - column_definition:
+        - naked_identifier: c8
+        - data_type:
+            data_type_identifier: INT
+        - column_constraint_segment:
+          - keyword: NOT
+          - keyword: 'NULL'
+        - column_constraint_segment:
+            keyword: DEFAULT
+            cast_expression:
+              numeric_literal: '1'
+              casting_operator: '::'
+              data_type:
+                data_type_identifier: INT
       - end_bracket: )
diff --git a/test/fixtures/dialects/ansi/create_table_column_comment.yml b/test/fixtures/dialects/ansi/create_table_column_comment.yml
index 8bd2e0f..f81dd93 100644
--- a/test/fixtures/dialects/ansi/create_table_column_comment.yml
+++ b/test/fixtures/dialects/ansi/create_table_column_comment.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: f750b63c91a2ba7f4d2f5925dc63e4e39a07723ce9d01242e01b4991c4634bde
+_hash: b4f5a4ef29cb9b0822653e329beb20cb29f4b504edf94d688056a0039b317f4c
 file:
   statement:
     create_table_statement:
@@ -17,11 +17,11 @@ file:
           naked_identifier: id
           data_type:
             data_type_identifier: VARCHAR
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '100'
-              end_bracket: )
+                end_bracket: )
           column_constraint_segment:
             comment_clause:
               keyword: COMMENT
diff --git a/test/fixtures/dialects/ansi/create_table_column_constraint.yml b/test/fixtures/dialects/ansi/create_table_column_constraint.yml
index f7c0076..69431cd 100644
--- a/test/fixtures/dialects/ansi/create_table_column_constraint.yml
+++ b/test/fixtures/dialects/ansi/create_table_column_constraint.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: e0482f0f2f6480c796b00f793b89ddd654365fbf4f741719eff8fea05625c9da
+_hash: 63b85c70e580cbb8521d6485449c04eba626e93d2f7e68c5fdcdf4d0c39e9279
 file:
 - statement:
     create_table_statement:
@@ -85,11 +85,11 @@ file:
           naked_identifier: LastName
           data_type:
             data_type_identifier: varchar
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '255'
-              end_bracket: )
+                end_bracket: )
           column_constraint_segment:
           - keyword: NOT
           - keyword: 'NULL'
@@ -98,11 +98,11 @@ file:
           naked_identifier: FirstName
           data_type:
             data_type_identifier: varchar
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '255'
-              end_bracket: )
+                end_bracket: )
       - comma: ','
       - column_definition:
           naked_identifier: Age
@@ -113,11 +113,11 @@ file:
           naked_identifier: City
           data_type:
             data_type_identifier: varchar
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '255'
-              end_bracket: )
+                end_bracket: )
       - comma: ','
       - column_definition:
           naked_identifier: CONSTRAINT
diff --git a/test/fixtures/dialects/ansi/create_table_table_comment.yml b/test/fixtures/dialects/ansi/create_table_table_comment.yml
index e962f54..3c8c7c8 100644
--- a/test/fixtures/dialects/ansi/create_table_table_comment.yml
+++ b/test/fixtures/dialects/ansi/create_table_table_comment.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 0f17c9e8db507f2549f422e942881114771ad24fe496b15ddd4eb4827a48103b
+_hash: da6f038cd44a46433364b3711dd06159873265fb4432c8c90053a97068b9b4b6
 file:
   statement:
     create_table_statement:
@@ -17,11 +17,11 @@ file:
           naked_identifier: id
           data_type:
             data_type_identifier: VARCHAR
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '100'
-              end_bracket: )
+                end_bracket: )
         end_bracket: )
     - comment_clause:
         keyword: COMMENT
diff --git a/test/fixtures/dialects/ansi/create_table_varchar.yml b/test/fixtures/dialects/ansi/create_table_varchar.yml
index d266f85..8abd1dc 100644
--- a/test/fixtures/dialects/ansi/create_table_varchar.yml
+++ b/test/fixtures/dialects/ansi/create_table_varchar.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: d832e3cb263c7f053601b9bf1e85b500e2293228827efd8d1da15bda68a06994
+_hash: ddbfbeee0227954476881c7b0236fdc0ca7fa47d1401c2911ed5508aff69f55f
 file:
   statement:
     create_table_statement:
@@ -17,9 +17,9 @@ file:
           naked_identifier: id
           data_type:
             data_type_identifier: VARCHAR
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '100'
-              end_bracket: )
+                end_bracket: )
         end_bracket: )
diff --git a/test/fixtures/dialects/ansi/drop_cast.sql b/test/fixtures/dialects/ansi/drop_cast.sql
new file mode 100644
index 0000000..422cdda
--- /dev/null
+++ b/test/fixtures/dialects/ansi/drop_cast.sql
@@ -0,0 +1,9 @@
+DROP CAST (int AS bool);
+
+DROP CAST (int AS bool) RESTRICT;
+
+DROP CAST (int AS bool) CASCADE;
+
+DROP CAST (udt_1 AS udt_2);
+
+DROP CAST (sch.udt_1 AS sch.udt_2);
diff --git a/test/fixtures/dialects/ansi/drop_cast.yml b/test/fixtures/dialects/ansi/drop_cast.yml
new file mode 100644
index 0000000..83e8f99
--- /dev/null
+++ b/test/fixtures/dialects/ansi/drop_cast.yml
@@ -0,0 +1,78 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 9ab44c21d3a8f7594f50924ffe73aaf87716721475787ac5558e197378caf83c
+file:
+- statement:
+    drop_cast_statement:
+    - keyword: DROP
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: int
+      - keyword: AS
+      - data_type:
+          data_type_identifier: bool
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    drop_cast_statement:
+    - keyword: DROP
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: int
+      - keyword: AS
+      - data_type:
+          data_type_identifier: bool
+      - end_bracket: )
+    - keyword: RESTRICT
+- statement_terminator: ;
+- statement:
+    drop_cast_statement:
+    - keyword: DROP
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: int
+      - keyword: AS
+      - data_type:
+          data_type_identifier: bool
+      - end_bracket: )
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_cast_statement:
+    - keyword: DROP
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: udt_1
+      - keyword: AS
+      - data_type:
+          data_type_identifier: udt_2
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    drop_cast_statement:
+    - keyword: DROP
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          naked_identifier: sch
+          dot: .
+          data_type_identifier: udt_1
+      - keyword: AS
+      - data_type:
+          naked_identifier: sch
+          dot: .
+          data_type_identifier: udt_2
+      - end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/ansi/from_fetch.sql b/test/fixtures/dialects/ansi/from_fetch.sql
new file mode 100644
index 0000000..110c7f6
--- /dev/null
+++ b/test/fixtures/dialects/ansi/from_fetch.sql
@@ -0,0 +1,3 @@
+SELECT *
+FROM counter
+FETCH FIRST 10 ROWS ONLY
diff --git a/test/fixtures/dialects/ansi/from_fetch.yml b/test/fixtures/dialects/ansi/from_fetch.yml
new file mode 100644
index 0000000..a49b1e9
--- /dev/null
+++ b/test/fixtures/dialects/ansi/from_fetch.yml
@@ -0,0 +1,28 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: dc15eef87b4a1e04131c0c5059abe5c561cb4e8a656934f74aec336cfe33f1e0
+file:
+  statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: counter
+      fetch_clause:
+      - keyword: FETCH
+      - keyword: FIRST
+      - numeric_literal: '10'
+      - keyword: ROWS
+      - keyword: ONLY
diff --git a/test/fixtures/dialects/ansi/functions_a.sql b/test/fixtures/dialects/ansi/functions_a.sql
index 53c172e..e1ceaba 100644
--- a/test/fixtures/dialects/ansi/functions_a.sql
+++ b/test/fixtures/dialects/ansi/functions_a.sql
@@ -1,4 +1,6 @@
 SELECT
     DATE(t), ROUND(b, 2),
     LEFT(right(s, 5), LEN(s + 6)) as compound
-FROM tbl_b
+FROM tbl_b;
+
+SELECT _custom_function(5) as test_column;
diff --git a/test/fixtures/dialects/ansi/functions_a.yml b/test/fixtures/dialects/ansi/functions_a.yml
index 5ec55ce..ecb6c56 100644
--- a/test/fixtures/dialects/ansi/functions_a.yml
+++ b/test/fixtures/dialects/ansi/functions_a.yml
@@ -3,9 +3,9 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: bce31208bf7372db2c80a7234fa531cc2fbe5ba46697eea28c04df664095218f
+_hash: cddea2ed2c29465ac50497915f3647aa81be4ffb94314bf52367a5bfaee996d7
 file:
-  statement:
+- statement:
     select_statement:
       select_clause:
       - keyword: SELECT
@@ -77,3 +77,21 @@ file:
             table_expression:
               table_reference:
                 naked_identifier: tbl_b
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: _custom_function
+            bracketed:
+              start_bracket: (
+              expression:
+                numeric_literal: '5'
+              end_bracket: )
+          alias_expression:
+            keyword: as
+            naked_identifier: test_column
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/ansi/group_by_fetch.sql b/test/fixtures/dialects/ansi/group_by_fetch.sql
new file mode 100644
index 0000000..66c5f63
--- /dev/null
+++ b/test/fixtures/dialects/ansi/group_by_fetch.sql
@@ -0,0 +1,7 @@
+SELECT
+    status
+FROM
+    orders
+GROUP BY
+    status
+FETCH FIRST 3 ROWS ONLY
diff --git a/test/fixtures/dialects/ansi/group_by_fetch.yml b/test/fixtures/dialects/ansi/group_by_fetch.yml
new file mode 100644
index 0000000..fc1e555
--- /dev/null
+++ b/test/fixtures/dialects/ansi/group_by_fetch.yml
@@ -0,0 +1,32 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 379ba05fd8fd1bd3e4c5030df27c24869bb5ac75ece660e7da1a45dc715634c6
+file:
+  statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          column_reference:
+            naked_identifier: status
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: orders
+      groupby_clause:
+      - keyword: GROUP
+      - keyword: BY
+      - column_reference:
+          naked_identifier: status
+      fetch_clause:
+      - keyword: FETCH
+      - keyword: FIRST
+      - numeric_literal: '3'
+      - keyword: ROWS
+      - keyword: ONLY
diff --git a/test/fixtures/dialects/ansi/having_fetch.sql b/test/fixtures/dialects/ansi/having_fetch.sql
new file mode 100644
index 0000000..b692d66
--- /dev/null
+++ b/test/fixtures/dialects/ansi/having_fetch.sql
@@ -0,0 +1,10 @@
+SELECT
+	house_id,
+	COUNT (person_id)
+FROM
+	persons
+GROUP BY
+	house_id
+HAVING
+	COUNT (person_id) > 10
+FETCH FIRST 30 ROWS ONLY
diff --git a/test/fixtures/dialects/ansi/having_fetch.yml b/test/fixtures/dialects/ansi/having_fetch.yml
new file mode 100644
index 0000000..db24080
--- /dev/null
+++ b/test/fixtures/dialects/ansi/having_fetch.yml
@@ -0,0 +1,58 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 5eabc81d543256d133d52eba8e50015bdbc30e9b8bac8f19202393dc712ed6f6
+file:
+  statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: house_id
+      - comma: ','
+      - select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: COUNT
+            bracketed:
+              start_bracket: (
+              expression:
+                column_reference:
+                  naked_identifier: person_id
+              end_bracket: )
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: persons
+      groupby_clause:
+      - keyword: GROUP
+      - keyword: BY
+      - column_reference:
+          naked_identifier: house_id
+      having_clause:
+        keyword: HAVING
+        expression:
+          function:
+            function_name:
+              function_name_identifier: COUNT
+            bracketed:
+              start_bracket: (
+              expression:
+                column_reference:
+                  naked_identifier: person_id
+              end_bracket: )
+          comparison_operator:
+            raw_comparison_operator: '>'
+          numeric_literal: '10'
+      fetch_clause:
+      - keyword: FETCH
+      - keyword: FIRST
+      - numeric_literal: '30'
+      - keyword: ROWS
+      - keyword: ONLY
diff --git a/test/fixtures/dialects/ansi/insert_using_subquery.sql b/test/fixtures/dialects/ansi/insert_using_subquery.sql
new file mode 100644
index 0000000..ec87ce7
--- /dev/null
+++ b/test/fixtures/dialects/ansi/insert_using_subquery.sql
@@ -0,0 +1,5 @@
+INSERT INTO foo SELECT 0 AS bar;
+
+INSERT INTO foo (SELECT 1 AS bar);
+
+INSERT INTO foo ((SELECT 1 AS bar));
diff --git a/test/fixtures/dialects/ansi/insert_using_subquery.yml b/test/fixtures/dialects/ansi/insert_using_subquery.yml
new file mode 100644
index 0000000..4fd5c8e
--- /dev/null
+++ b/test/fixtures/dialects/ansi/insert_using_subquery.yml
@@ -0,0 +1,61 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: a385cd32bc29fe03941e9b42b6ac62fb8b7d5760272ed88f2a543a7508b6b927
+file:
+- statement:
+    insert_statement:
+    - keyword: INSERT
+    - keyword: INTO
+    - table_reference:
+        naked_identifier: foo
+    - select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            numeric_literal: '0'
+            alias_expression:
+              keyword: AS
+              naked_identifier: bar
+- statement_terminator: ;
+- statement:
+    insert_statement:
+    - keyword: INSERT
+    - keyword: INTO
+    - table_reference:
+        naked_identifier: foo
+    - bracketed:
+        start_bracket: (
+        select_statement:
+          select_clause:
+            keyword: SELECT
+            select_clause_element:
+              numeric_literal: '1'
+              alias_expression:
+                keyword: AS
+                naked_identifier: bar
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    insert_statement:
+    - keyword: INSERT
+    - keyword: INTO
+    - table_reference:
+        naked_identifier: foo
+    - bracketed:
+        start_bracket: (
+        bracketed:
+          start_bracket: (
+          select_statement:
+            select_clause:
+              keyword: SELECT
+              select_clause_element:
+                numeric_literal: '1'
+                alias_expression:
+                  keyword: AS
+                  naked_identifier: bar
+          end_bracket: )
+        end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/ansi/select_fetch.sql b/test/fixtures/dialects/ansi/select_fetch.sql
new file mode 100644
index 0000000..ec67429
--- /dev/null
+++ b/test/fixtures/dialects/ansi/select_fetch.sql
@@ -0,0 +1,5 @@
+SELECT
+    EMPLOYEE.EMPNO
+FROM
+    EMPLOYEE
+FETCH FIRST 3 ROWS ONLY
diff --git a/test/fixtures/dialects/ansi/select_fetch.yml b/test/fixtures/dialects/ansi/select_fetch.yml
new file mode 100644
index 0000000..7c8820f
--- /dev/null
+++ b/test/fixtures/dialects/ansi/select_fetch.yml
@@ -0,0 +1,29 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 8019caf6295be540dd5bef757766ba8f960d1425d6cb0d1db4779b1816c11f2e
+file:
+  statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          column_reference:
+          - naked_identifier: EMPLOYEE
+          - dot: .
+          - naked_identifier: EMPNO
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: EMPLOYEE
+      fetch_clause:
+      - keyword: FETCH
+      - keyword: FIRST
+      - numeric_literal: '3'
+      - keyword: ROWS
+      - keyword: ONLY
diff --git a/test/fixtures/dialects/ansi/select_g_fetch.sql b/test/fixtures/dialects/ansi/select_g_fetch.sql
new file mode 100644
index 0000000..c3c1912
--- /dev/null
+++ b/test/fixtures/dialects/ansi/select_g_fetch.sql
@@ -0,0 +1,5 @@
+-- More complex select clause without from clause
+SELECT
+    NULL::INT AS user_id,
+    NULL::INT AS is_paid
+FETCH FIRST 0 ROWS ONLY
diff --git a/test/fixtures/dialects/ansi/select_g_fetch.yml b/test/fixtures/dialects/ansi/select_g_fetch.yml
new file mode 100644
index 0000000..c2ca1e9
--- /dev/null
+++ b/test/fixtures/dialects/ansi/select_g_fetch.yml
@@ -0,0 +1,38 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 43986e018fd3b9246c7ee47a06aab2694d44e695d5da72dc0ccae734c19b6985
+file:
+  statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          expression:
+            cast_expression:
+              null_literal: 'NULL'
+              casting_operator: '::'
+              data_type:
+                data_type_identifier: INT
+          alias_expression:
+            keyword: AS
+            naked_identifier: user_id
+      - comma: ','
+      - select_clause_element:
+          expression:
+            cast_expression:
+              null_literal: 'NULL'
+              casting_operator: '::'
+              data_type:
+                data_type_identifier: INT
+          alias_expression:
+            keyword: AS
+            naked_identifier: is_paid
+      fetch_clause:
+      - keyword: FETCH
+      - keyword: FIRST
+      - numeric_literal: '0'
+      - keyword: ROWS
+      - keyword: ONLY
diff --git a/test/fixtures/dialects/ansi/select_order_fetch.sql b/test/fixtures/dialects/ansi/select_order_fetch.sql
new file mode 100644
index 0000000..df5ed54
--- /dev/null
+++ b/test/fixtures/dialects/ansi/select_order_fetch.sql
@@ -0,0 +1,7 @@
+SELECT
+    EMPLOYEE.EMPNO
+FROM
+    EMPLOYEE
+ORDER BY
+    SALARY DESC
+FETCH FIRST 3 ROWS ONLY
diff --git a/test/fixtures/dialects/ansi/select_order_fetch.yml b/test/fixtures/dialects/ansi/select_order_fetch.yml
new file mode 100644
index 0000000..98bfaea
--- /dev/null
+++ b/test/fixtures/dialects/ansi/select_order_fetch.yml
@@ -0,0 +1,35 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: e2f9ffd79819b655896c0464ea8f489882038a7ca53e79331662bb85d9a830cd
+file:
+  statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          column_reference:
+          - naked_identifier: EMPLOYEE
+          - dot: .
+          - naked_identifier: EMPNO
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: EMPLOYEE
+      orderby_clause:
+      - keyword: ORDER
+      - keyword: BY
+      - column_reference:
+          naked_identifier: SALARY
+      - keyword: DESC
+      fetch_clause:
+      - keyword: FETCH
+      - keyword: FIRST
+      - numeric_literal: '3'
+      - keyword: ROWS
+      - keyword: ONLY
diff --git a/test/fixtures/dialects/ansi/select_union_bracketed.sql b/test/fixtures/dialects/ansi/select_union_bracketed.sql
new file mode 100644
index 0000000..c47ecef
--- /dev/null
+++ b/test/fixtures/dialects/ansi/select_union_bracketed.sql
@@ -0,0 +1,3 @@
+(SELECT 0) UNION (SELECT 1);
+
+((SELECT 0)) UNION ((SELECT 1));
diff --git a/test/fixtures/dialects/ansi/select_union_bracketed.yml b/test/fixtures/dialects/ansi/select_union_bracketed.yml
new file mode 100644
index 0000000..7010f44
--- /dev/null
+++ b/test/fixtures/dialects/ansi/select_union_bracketed.yml
@@ -0,0 +1,55 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: ab415fa5d23ce0bdcac9d878f877f9bf64ec2975194120cc86daf2d250457660
+file:
+- statement:
+    set_expression:
+    - bracketed:
+        start_bracket: (
+        select_statement:
+          select_clause:
+            keyword: SELECT
+            select_clause_element:
+              numeric_literal: '0'
+        end_bracket: )
+    - set_operator:
+        keyword: UNION
+    - bracketed:
+        start_bracket: (
+        select_statement:
+          select_clause:
+            keyword: SELECT
+            select_clause_element:
+              numeric_literal: '1'
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    set_expression:
+    - bracketed:
+        start_bracket: (
+        bracketed:
+          start_bracket: (
+          select_statement:
+            select_clause:
+              keyword: SELECT
+              select_clause_element:
+                numeric_literal: '0'
+          end_bracket: )
+        end_bracket: )
+    - set_operator:
+        keyword: UNION
+    - bracketed:
+        start_bracket: (
+        bracketed:
+          start_bracket: (
+          select_statement:
+            select_clause:
+              keyword: SELECT
+              select_clause_element:
+                numeric_literal: '1'
+          end_bracket: )
+        end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/ansi/where_fetch.sql b/test/fixtures/dialects/ansi/where_fetch.sql
new file mode 100644
index 0000000..d0135d5
--- /dev/null
+++ b/test/fixtures/dialects/ansi/where_fetch.sql
@@ -0,0 +1,3 @@
+SELECT * FROM Persons
+WHERE Country='France'
+FETCH FIRST 5 ROWS ONLY;
diff --git a/test/fixtures/dialects/ansi/where_fetch.yml b/test/fixtures/dialects/ansi/where_fetch.yml
new file mode 100644
index 0000000..1a21aee
--- /dev/null
+++ b/test/fixtures/dialects/ansi/where_fetch.yml
@@ -0,0 +1,37 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 551b47f53b17ce9ec37f64ff155f33d78305241d2b2bca2b76718016bbaededd
+file:
+  statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: Persons
+      where_clause:
+        keyword: WHERE
+        expression:
+          column_reference:
+            naked_identifier: Country
+          comparison_operator:
+            raw_comparison_operator: '='
+          quoted_literal: "'France'"
+      fetch_clause:
+      - keyword: FETCH
+      - keyword: FIRST
+      - numeric_literal: '5'
+      - keyword: ROWS
+      - keyword: ONLY
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/athena/create_array_table.sql b/test/fixtures/dialects/athena/create_array_table.sql
new file mode 100644
index 0000000..8af06cf
--- /dev/null
+++ b/test/fixtures/dialects/athena/create_array_table.sql
@@ -0,0 +1,2 @@
+CREATE TABLE array_table (c1 array<integer>) LOCATION '...';
+INSERT INTO array_table values(ARRAY[1,2,3]);
diff --git a/test/fixtures/dialects/athena/create_array_table.yml b/test/fixtures/dialects/athena/create_array_table.yml
new file mode 100644
index 0000000..90fd7ae
--- /dev/null
+++ b/test/fixtures/dialects/athena/create_array_table.yml
@@ -0,0 +1,54 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 2fe50ad709b60c1e739fec5ef4841d766e9f03bb56d46500ff6d673bb5e0ece5
+file:
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: array_table
+    - bracketed:
+        start_bracket: (
+        column_definition:
+          naked_identifier: c1
+          data_type:
+            array_type:
+              keyword: array
+              array_type_schema:
+                start_angle_bracket: <
+                data_type:
+                  primitive_type:
+                    keyword: integer
+                end_angle_bracket: '>'
+        end_bracket: )
+    - keyword: LOCATION
+    - quoted_literal: "'...'"
+- statement_terminator: ;
+- statement:
+    insert_statement:
+    - keyword: INSERT
+    - keyword: INTO
+    - table_reference:
+        naked_identifier: array_table
+    - values_clause:
+        keyword: values
+        bracketed:
+          start_bracket: (
+          expression:
+            typed_array_literal:
+              array_type:
+                keyword: ARRAY
+              array_literal:
+              - start_square_bracket: '['
+              - numeric_literal: '1'
+              - comma: ','
+              - numeric_literal: '2'
+              - comma: ','
+              - numeric_literal: '3'
+              - end_square_bracket: ']'
+          end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/athena/create_external_table.sql b/test/fixtures/dialects/athena/create_external_table.sql
index 311003e..ad5fb59 100644
--- a/test/fixtures/dialects/athena/create_external_table.sql
+++ b/test/fixtures/dialects/athena/create_external_table.sql
@@ -8,3 +8,35 @@ create external table my_database.my_table(
       ESCAPED BY '\\'
       LINES TERMINATED BY '\n'
     LOCATION 's3://athena-examples-myregion/flight/csv/';
+
+CREATE TABLE bucketed_table WITH (
+  bucketed_by = ARRAY[column_name],
+  bucket_count = 30, format = 'PARQUET',
+  external_location ='s3://DOC-EXAMPLE-BUCKET/tables/parquet_table/'
+) AS
+SELECT
+  *
+FROM
+  table_name;
+
+CREATE EXTERNAL TABLE `tpch100.lineitem_parq_partitioned`(
+  `l_orderkey` int,
+  `l_partkey` int,
+  `l_suppkey` int,
+  `l_linenumber` int,
+  `l_quantity` double,
+  `l_extendedprice` double,
+  `l_discount` double,
+  `l_tax` double,
+  `l_returnflag` string,
+  `l_linestatus` string,
+  `l_commitdate` string,
+  `l_receiptdate` string,
+  `l_shipinstruct` string,
+  `l_comment` string)
+PARTITIONED BY (
+  `l_shipdate` string)
+ROW FORMAT SERDE
+  'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' STORED AS INPUTFORMAT
+  'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' OUTPUTFORMAT
+  'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat' LOCATION   's3://<my-tpch-bucket>/lineitem/'
diff --git a/test/fixtures/dialects/athena/create_external_table.yml b/test/fixtures/dialects/athena/create_external_table.yml
index 60f5764..fb0c577 100644
--- a/test/fixtures/dialects/athena/create_external_table.yml
+++ b/test/fixtures/dialects/athena/create_external_table.yml
@@ -3,9 +3,9 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 39e5d0780331792ce8d39c3674f19976b486a0de57d38a705d3c06a9c14afa1f
+_hash: 68b5d13306d23d5e9b8c3df0bd4c40672178cb10f21bdb196c90cd0549a27744
 file:
-  statement:
+- statement:
     create_table_statement:
     - keyword: create
     - keyword: external
@@ -60,5 +60,173 @@ file:
       - keyword: BY
       - quoted_literal: "'\\n'"
     - keyword: LOCATION
-    - raw: "'s3://athena-examples-myregion/flight/csv/'"
-  statement_terminator: ;
+    - quoted_literal: "'s3://athena-examples-myregion/flight/csv/'"
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: bucketed_table
+    - keyword: WITH
+    - bracketed:
+      - start_bracket: (
+      - keyword: bucketed_by
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - typed_array_literal:
+          array_type:
+            keyword: ARRAY
+          array_literal:
+            start_square_bracket: '['
+            column_reference:
+              naked_identifier: column_name
+            end_square_bracket: ']'
+      - comma: ','
+      - keyword: bucket_count
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - numeric_literal: '30'
+      - comma: ','
+      - keyword: format
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'PARQUET'"
+      - comma: ','
+      - keyword: external_location
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'s3://DOC-EXAMPLE-BUCKET/tables/parquet_table/'"
+      - end_bracket: )
+    - keyword: AS
+    - select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: table_name
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: EXTERNAL
+    - keyword: TABLE
+    - table_reference:
+        quoted_identifier: '`tpch100.lineitem_parq_partitioned`'
+    - bracketed:
+      - start_bracket: (
+      - column_definition:
+          quoted_identifier: '`l_orderkey`'
+          data_type:
+            primitive_type:
+              keyword: int
+      - comma: ','
+      - column_definition:
+          quoted_identifier: '`l_partkey`'
+          data_type:
+            primitive_type:
+              keyword: int
+      - comma: ','
+      - column_definition:
+          quoted_identifier: '`l_suppkey`'
+          data_type:
+            primitive_type:
+              keyword: int
+      - comma: ','
+      - column_definition:
+          quoted_identifier: '`l_linenumber`'
+          data_type:
+            primitive_type:
+              keyword: int
+      - comma: ','
+      - column_definition:
+          quoted_identifier: '`l_quantity`'
+          data_type:
+            primitive_type:
+              keyword: double
+      - comma: ','
+      - column_definition:
+          quoted_identifier: '`l_extendedprice`'
+          data_type:
+            primitive_type:
+              keyword: double
+      - comma: ','
+      - column_definition:
+          quoted_identifier: '`l_discount`'
+          data_type:
+            primitive_type:
+              keyword: double
+      - comma: ','
+      - column_definition:
+          quoted_identifier: '`l_tax`'
+          data_type:
+            primitive_type:
+              keyword: double
+      - comma: ','
+      - column_definition:
+          quoted_identifier: '`l_returnflag`'
+          data_type:
+            primitive_type:
+              keyword: string
+      - comma: ','
+      - column_definition:
+          quoted_identifier: '`l_linestatus`'
+          data_type:
+            primitive_type:
+              keyword: string
+      - comma: ','
+      - column_definition:
+          quoted_identifier: '`l_commitdate`'
+          data_type:
+            primitive_type:
+              keyword: string
+      - comma: ','
+      - column_definition:
+          quoted_identifier: '`l_receiptdate`'
+          data_type:
+            primitive_type:
+              keyword: string
+      - comma: ','
+      - column_definition:
+          quoted_identifier: '`l_shipinstruct`'
+          data_type:
+            primitive_type:
+              keyword: string
+      - comma: ','
+      - column_definition:
+          quoted_identifier: '`l_comment`'
+          data_type:
+            primitive_type:
+              keyword: string
+      - end_bracket: )
+    - keyword: PARTITIONED
+    - keyword: BY
+    - bracketed:
+        start_bracket: (
+        column_definition:
+          quoted_identifier: '`l_shipdate`'
+          data_type:
+            primitive_type:
+              keyword: string
+        end_bracket: )
+    - row_format_clause:
+      - keyword: ROW
+      - keyword: FORMAT
+      - keyword: SERDE
+      - quoted_literal: "'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'"
+    - keyword: STORED
+    - keyword: AS
+    - keyword: INPUTFORMAT
+    - quoted_literal: "'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'"
+    - keyword: OUTPUTFORMAT
+    - quoted_literal: "'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'"
+    - keyword: LOCATION
+    - quoted_literal: "'s3://<my-tpch-bucket>/lineitem/'"
diff --git a/test/fixtures/dialects/athena/create_external_table_input_format.yml b/test/fixtures/dialects/athena/create_external_table_input_format.yml
index 5fcd31c..4dc2ab8 100644
--- a/test/fixtures/dialects/athena/create_external_table_input_format.yml
+++ b/test/fixtures/dialects/athena/create_external_table_input_format.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 89736602cebb4b7df453b1a6b750b53e568799a82aed2c0777c13457e5539dbf
+_hash: d5589c53a4c69fbe84bd27c9e8d251f43a311f7a1a826e9a105ec1e715c27cf5
 file:
   statement:
     create_table_statement:
@@ -66,7 +66,7 @@ file:
     - keyword: OUTPUTFORMAT
     - quoted_literal: "'some output format'"
     - keyword: LOCATION
-    - raw: "'s3://athena-examples-myregion/some_data/'"
+    - quoted_literal: "'s3://athena-examples-myregion/some_data/'"
     - keyword: TBLPROPERTIES
     - bracketed:
       - start_bracket: (
diff --git a/test/fixtures/dialects/athena/create_external_table_struct.yml b/test/fixtures/dialects/athena/create_external_table_struct.yml
index 72b883a..d58f9c6 100644
--- a/test/fixtures/dialects/athena/create_external_table_struct.yml
+++ b/test/fixtures/dialects/athena/create_external_table_struct.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 7eb4fb1603f437728979d755c7316334d6dd9673f296aabfacf858596c696296
+_hash: 02a71f810091ee83c7d409ce6521263ea4063d017bee6cccd54bc366e1c6192e
 file:
   statement:
     create_table_statement:
@@ -49,20 +49,22 @@ file:
       - column_definition:
           naked_identifier: app
           data_type:
-          - keyword: struct
-          - start_angle_bracket: <
-          - naked_identifier: appName
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: string
-          - comma: ','
-          - naked_identifier: adamId
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: string
-          - end_angle_bracket: '>'
+            struct_type:
+              keyword: struct
+              struct_type_schema:
+              - start_angle_bracket: <
+              - naked_identifier: appName
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: string
+              - comma: ','
+              - naked_identifier: adamId
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: string
+              - end_angle_bracket: '>'
       - comma: ','
       - column_definition:
           naked_identifier: servingStatus
@@ -79,12 +81,14 @@ file:
       - column_definition:
           naked_identifier: countriesOrRegions
           data_type:
-            keyword: array
-            start_angle_bracket: <
-            data_type:
-              primitive_type:
-                keyword: string
-            end_angle_bracket: '>'
+            array_type:
+              keyword: array
+              array_type_schema:
+                start_angle_bracket: <
+                data_type:
+                  primitive_type:
+                    keyword: string
+                end_angle_bracket: '>'
       - comma: ','
       - column_definition:
           naked_identifier: modificationTime
@@ -95,38 +99,42 @@ file:
       - column_definition:
           naked_identifier: totalBudget
           data_type:
-          - keyword: struct
-          - start_angle_bracket: <
-          - naked_identifier: amount
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: int
-          - comma: ','
-          - naked_identifier: currency
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: string
-          - end_angle_bracket: '>'
+            struct_type:
+              keyword: struct
+              struct_type_schema:
+              - start_angle_bracket: <
+              - naked_identifier: amount
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: int
+              - comma: ','
+              - naked_identifier: currency
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: string
+              - end_angle_bracket: '>'
       - comma: ','
       - column_definition:
           naked_identifier: dailyBudget
           data_type:
-          - keyword: struct
-          - start_angle_bracket: <
-          - naked_identifier: amount
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: int
-          - comma: ','
-          - naked_identifier: currency
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: string
-          - end_angle_bracket: '>'
+            struct_type:
+              keyword: struct
+              struct_type_schema:
+              - start_angle_bracket: <
+              - naked_identifier: amount
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: int
+              - comma: ','
+              - naked_identifier: currency
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: string
+              - end_angle_bracket: '>'
       - comma: ','
       - column_definition:
           naked_identifier: displayStatus
@@ -137,12 +145,14 @@ file:
       - column_definition:
           naked_identifier: supplySources
           data_type:
-            keyword: array
-            start_angle_bracket: <
-            data_type:
-              primitive_type:
-                keyword: string
-            end_angle_bracket: '>'
+            array_type:
+              keyword: array
+              array_type_schema:
+                start_angle_bracket: <
+                data_type:
+                  primitive_type:
+                    keyword: string
+                end_angle_bracket: '>'
       - comma: ','
       - column_definition:
           naked_identifier: adChannelType
@@ -225,74 +235,82 @@ file:
       - column_definition:
           naked_identifier: avgCPA
           data_type:
-          - keyword: struct
-          - start_angle_bracket: <
-          - naked_identifier: amount
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: int
-          - comma: ','
-          - naked_identifier: currency
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: string
-          - end_angle_bracket: '>'
+            struct_type:
+              keyword: struct
+              struct_type_schema:
+              - start_angle_bracket: <
+              - naked_identifier: amount
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: int
+              - comma: ','
+              - naked_identifier: currency
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: string
+              - end_angle_bracket: '>'
       - comma: ','
       - column_definition:
           naked_identifier: avgCPT
           data_type:
-          - keyword: struct
-          - start_angle_bracket: <
-          - naked_identifier: amount
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: int
-          - comma: ','
-          - naked_identifier: currency
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: string
-          - end_angle_bracket: '>'
+            struct_type:
+              keyword: struct
+              struct_type_schema:
+              - start_angle_bracket: <
+              - naked_identifier: amount
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: int
+              - comma: ','
+              - naked_identifier: currency
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: string
+              - end_angle_bracket: '>'
       - comma: ','
       - column_definition:
           naked_identifier: avgCPM
           data_type:
-          - keyword: struct
-          - start_angle_bracket: <
-          - naked_identifier: amount
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: int
-          - comma: ','
-          - naked_identifier: currency
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: string
-          - end_angle_bracket: '>'
+            struct_type:
+              keyword: struct
+              struct_type_schema:
+              - start_angle_bracket: <
+              - naked_identifier: amount
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: int
+              - comma: ','
+              - naked_identifier: currency
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: string
+              - end_angle_bracket: '>'
       - comma: ','
       - column_definition:
           naked_identifier: localSpend
           data_type:
-          - keyword: struct
-          - start_angle_bracket: <
-          - naked_identifier: amount
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: int
-          - comma: ','
-          - naked_identifier: currency
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: string
-          - end_angle_bracket: '>'
+            struct_type:
+              keyword: struct
+              struct_type_schema:
+              - start_angle_bracket: <
+              - naked_identifier: amount
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: int
+              - comma: ','
+              - naked_identifier: currency
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: string
+              - end_angle_bracket: '>'
       - comma: ','
       - column_definition:
           naked_identifier: conversionRate
@@ -326,5 +344,5 @@ file:
       - keyword: BY
       - quoted_literal: "'\\n'"
     - keyword: LOCATION
-    - raw: "'s3://athena-examples-myregion/flight/csv/'"
+    - quoted_literal: "'s3://athena-examples-myregion/flight/csv/'"
   statement_terminator: ;
diff --git a/test/fixtures/dialects/athena/create_map_table.sql b/test/fixtures/dialects/athena/create_map_table.sql
new file mode 100644
index 0000000..acafe42
--- /dev/null
+++ b/test/fixtures/dialects/athena/create_map_table.sql
@@ -0,0 +1,2 @@
+CREATE TABLE map_table(c1 map<string, integer>) LOCATION '...';
+INSERT INTO map_table values(MAP(ARRAY['foo', 'bar'], ARRAY[1, 2]));
diff --git a/test/fixtures/dialects/athena/create_map_table.yml b/test/fixtures/dialects/athena/create_map_table.yml
new file mode 100644
index 0000000..634aaa1
--- /dev/null
+++ b/test/fixtures/dialects/athena/create_map_table.yml
@@ -0,0 +1,73 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: f9ce164e521bde2f59e168d96fa1333eab314cb0e034162c363d9e79413c946e
+file:
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: map_table
+    - bracketed:
+        start_bracket: (
+        column_definition:
+          naked_identifier: c1
+          data_type:
+            map_type:
+              keyword: map
+              map_type_schema:
+                start_angle_bracket: <
+                primitive_type:
+                  keyword: string
+                comma: ','
+                data_type:
+                  primitive_type:
+                    keyword: integer
+                end_angle_bracket: '>'
+        end_bracket: )
+    - keyword: LOCATION
+    - quoted_literal: "'...'"
+- statement_terminator: ;
+- statement:
+    insert_statement:
+    - keyword: INSERT
+    - keyword: INTO
+    - table_reference:
+        naked_identifier: map_table
+    - values_clause:
+        keyword: values
+        bracketed:
+          start_bracket: (
+          expression:
+            function:
+              function_name:
+                function_name_identifier: MAP
+              bracketed:
+              - start_bracket: (
+              - expression:
+                  typed_array_literal:
+                    array_type:
+                      keyword: ARRAY
+                    array_literal:
+                    - start_square_bracket: '['
+                    - quoted_literal: "'foo'"
+                    - comma: ','
+                    - quoted_literal: "'bar'"
+                    - end_square_bracket: ']'
+              - comma: ','
+              - expression:
+                  typed_array_literal:
+                    array_type:
+                      keyword: ARRAY
+                    array_literal:
+                    - start_square_bracket: '['
+                    - numeric_literal: '1'
+                    - comma: ','
+                    - numeric_literal: '2'
+                    - end_square_bracket: ']'
+              - end_bracket: )
+          end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/athena/create_or_replace_view.sql b/test/fixtures/dialects/athena/create_or_replace_view.sql
deleted file mode 100644
index d24ed03..0000000
--- a/test/fixtures/dialects/athena/create_or_replace_view.sql
+++ /dev/null
@@ -1,6 +0,0 @@
-create or replace view my_database.my_view as
-select
-    field_1,
-    field_2,
-    field_3
-from my_table;
diff --git a/test/fixtures/dialects/athena/create_or_replace_view.yml b/test/fixtures/dialects/athena/create_or_replace_view.yml
deleted file mode 100644
index 991bb5c..0000000
--- a/test/fixtures/dialects/athena/create_or_replace_view.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-# YML test files are auto-generated from SQL files and should not be edited by
-# hand. To help enforce this, the "hash" field in the file must match a hash
-# computed by SQLFluff when running the tests. Please run
-# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
-# altering SQL files.
-_hash: 348f5ad52b16d02a03061684fd483fede59d80f48d8004ca19d7d184a0ae0e22
-file:
-  statement:
-    create_view_statement:
-    - keyword: create
-    - keyword: or
-    - keyword: replace
-    - keyword: view
-    - table_reference:
-      - naked_identifier: my_database
-      - dot: .
-      - naked_identifier: my_view
-    - keyword: as
-    - select_statement:
-        select_clause:
-        - keyword: select
-        - select_clause_element:
-            column_reference:
-              naked_identifier: field_1
-        - comma: ','
-        - select_clause_element:
-            column_reference:
-              naked_identifier: field_2
-        - comma: ','
-        - select_clause_element:
-            column_reference:
-              naked_identifier: field_3
-        from_clause:
-          keyword: from
-          from_expression:
-            from_expression_element:
-              table_expression:
-                table_reference:
-                  naked_identifier: my_table
-  statement_terminator: ;
diff --git a/test/fixtures/dialects/athena/create_partitioned_table.sql b/test/fixtures/dialects/athena/create_partitioned_table.sql
new file mode 100644
index 0000000..e909039
--- /dev/null
+++ b/test/fixtures/dialects/athena/create_partitioned_table.sql
@@ -0,0 +1,31 @@
+CREATE table my_lineitem_parq_partitioned
+WITH (partitioned_by = ARRAY['l_shipdate']) AS
+SELECT l_orderkey,
+         l_partkey,
+         l_suppkey,
+         l_linenumber,
+         l_quantity,
+         l_extendedprice,
+         l_discount,
+         l_tax,
+         l_returnflag,
+         l_linestatus,
+         l_commitdate,
+         l_receiptdate,
+         l_shipinstruct,
+         l_comment,
+         l_shipdate
+FROM tpch100.lineitem_parq_partitioned
+WHERE cast(l_shipdate as timestamp) < DATE('1992-02-01');
+
+CREATE TABLE ctas_iceberg
+WITH (
+    table_type = 'ICEBERG',
+    format = 'PARQUET',
+    location = 's3://my_athena_results/ctas_iceberg_parquet/',
+    is_external = false,
+    partitioning = ARRAY['month(dt)'],
+    vacuum_min_snapshots_to_keep = 10,
+    vacuum_max_snapshot_age_ms = 259200
+)
+AS SELECT key1, name1, 'date' FROM table1;
diff --git a/test/fixtures/dialects/athena/create_partitioned_table.yml b/test/fixtures/dialects/athena/create_partitioned_table.yml
new file mode 100644
index 0000000..840e51d
--- /dev/null
+++ b/test/fixtures/dialects/athena/create_partitioned_table.yml
@@ -0,0 +1,198 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: c5ce1d7c663f64372f59dfc0f2028db823240dbb4eeb85312cb99f45af4edca5
+file:
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: table
+    - table_reference:
+        naked_identifier: my_lineitem_parq_partitioned
+    - keyword: WITH
+    - bracketed:
+        start_bracket: (
+        keyword: partitioned_by
+        comparison_operator:
+          raw_comparison_operator: '='
+        typed_array_literal:
+          array_type:
+            keyword: ARRAY
+          array_literal:
+            start_square_bracket: '['
+            quoted_literal: "'l_shipdate'"
+            end_square_bracket: ']'
+        end_bracket: )
+    - keyword: AS
+    - select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: l_orderkey
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: l_partkey
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: l_suppkey
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: l_linenumber
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: l_quantity
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: l_extendedprice
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: l_discount
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: l_tax
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: l_returnflag
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: l_linestatus
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: l_commitdate
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: l_receiptdate
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: l_shipinstruct
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: l_comment
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: l_shipdate
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                - naked_identifier: tpch100
+                - dot: .
+                - naked_identifier: lineitem_parq_partitioned
+        where_clause:
+          keyword: WHERE
+          expression:
+          - function:
+              function_name:
+                function_name_identifier: cast
+              bracketed:
+                start_bracket: (
+                expression:
+                  column_reference:
+                    naked_identifier: l_shipdate
+                keyword: as
+                data_type:
+                  primitive_type:
+                    keyword: timestamp
+                end_bracket: )
+          - comparison_operator:
+              raw_comparison_operator: <
+          - function:
+              function_name:
+                function_name_identifier: DATE
+              bracketed:
+                start_bracket: (
+                expression:
+                  quoted_literal: "'1992-02-01'"
+                end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: ctas_iceberg
+    - keyword: WITH
+    - bracketed:
+      - start_bracket: (
+      - keyword: table_type
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'ICEBERG'"
+      - comma: ','
+      - keyword: format
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'PARQUET'"
+      - comma: ','
+      - keyword: location
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'s3://my_athena_results/ctas_iceberg_parquet/'"
+      - comma: ','
+      - keyword: is_external
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - boolean_literal: 'false'
+      - comma: ','
+      - keyword: partitioning
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - typed_array_literal:
+          array_type:
+            keyword: ARRAY
+          array_literal:
+            start_square_bracket: '['
+            quoted_literal: "'month(dt)'"
+            end_square_bracket: ']'
+      - comma: ','
+      - keyword: vacuum_min_snapshots_to_keep
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - numeric_literal: '10'
+      - comma: ','
+      - keyword: vacuum_max_snapshot_age_ms
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - numeric_literal: '259200'
+      - end_bracket: )
+    - keyword: AS
+    - select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: key1
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: name1
+        - comma: ','
+        - select_clause_element:
+            quoted_literal: "'date'"
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: table1
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/athena/create_struct_table.sql b/test/fixtures/dialects/athena/create_struct_table.sql
new file mode 100644
index 0000000..8d5b1b4
--- /dev/null
+++ b/test/fixtures/dialects/athena/create_struct_table.sql
@@ -0,0 +1,3 @@
+CREATE TABLE struct_table(c1 struct<name:varchar(10), age:integer>) LOCATION '...';
+
+INSERT INTO struct_table SELECT CAST(ROW('Bob', 38) AS ROW(name VARCHAR(10), age INTEGER));
diff --git a/test/fixtures/dialects/athena/create_struct_table.yml b/test/fixtures/dialects/athena/create_struct_table.yml
new file mode 100644
index 0000000..6b20e14
--- /dev/null
+++ b/test/fixtures/dialects/athena/create_struct_table.yml
@@ -0,0 +1,92 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 38d98df551428927f3dfbb68de7c816f03dfbd3cc494a3b07241c92db86247fe
+file:
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: struct_table
+    - bracketed:
+        start_bracket: (
+        column_definition:
+          naked_identifier: c1
+          data_type:
+            struct_type:
+              keyword: struct
+              struct_type_schema:
+              - start_angle_bracket: <
+              - naked_identifier: name
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: varchar
+                    bracketed_arguments:
+                      bracketed:
+                        start_bracket: (
+                        numeric_literal: '10'
+                        end_bracket: )
+              - comma: ','
+              - naked_identifier: age
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: integer
+              - end_angle_bracket: '>'
+        end_bracket: )
+    - keyword: LOCATION
+    - quoted_literal: "'...'"
+- statement_terminator: ;
+- statement:
+    insert_statement:
+    - keyword: INSERT
+    - keyword: INTO
+    - table_reference:
+        naked_identifier: struct_table
+    - select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            function:
+              function_name:
+                function_name_identifier: CAST
+              bracketed:
+                start_bracket: (
+                expression:
+                  function:
+                    function_name:
+                      function_name_identifier: ROW
+                    bracketed:
+                    - start_bracket: (
+                    - expression:
+                        quoted_literal: "'Bob'"
+                    - comma: ','
+                    - expression:
+                        numeric_literal: '38'
+                    - end_bracket: )
+                keyword: AS
+                data_type:
+                  keyword: ROW
+                  bracketed:
+                  - start_bracket: (
+                  - naked_identifier: name
+                  - data_type:
+                      primitive_type:
+                        keyword: VARCHAR
+                        bracketed_arguments:
+                          bracketed:
+                            start_bracket: (
+                            numeric_literal: '10'
+                            end_bracket: )
+                  - comma: ','
+                  - naked_identifier: age
+                  - data_type:
+                      primitive_type:
+                        keyword: INTEGER
+                  - end_bracket: )
+                end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/athena/create_table_as_select.yml b/test/fixtures/dialects/athena/create_table_as_select.yml
index 9b10808..f90a87e 100644
--- a/test/fixtures/dialects/athena/create_table_as_select.yml
+++ b/test/fixtures/dialects/athena/create_table_as_select.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: abf81ee9ace798cb2eb60dcd9cd07450e8176ffe0cc16ff318a091a2c47de691
+_hash: 910d1634c67b90facb16c20670e54e39bdfd55df92bf42b4c4fcb5940377dd27
 file:
   statement:
     create_table_statement:
@@ -27,11 +27,13 @@ file:
       - keyword: partitioned_by
       - comparison_operator:
           raw_comparison_operator: '='
-      - array_literal:
-          keyword: array
-          start_square_bracket: '['
-          quoted_literal: "'load_date'"
-          end_square_bracket: ']'
+      - typed_array_literal:
+          array_type:
+            keyword: array
+          array_literal:
+            start_square_bracket: '['
+            quoted_literal: "'load_date'"
+            end_square_bracket: ']'
       - end_bracket: )
     - keyword: AS
     - select_statement:
diff --git a/test/fixtures/dialects/athena/create_view.sql b/test/fixtures/dialects/athena/create_view.sql
new file mode 100644
index 0000000..19eeb5b
--- /dev/null
+++ b/test/fixtures/dialects/athena/create_view.sql
@@ -0,0 +1,10 @@
+CREATE VIEW test AS
+SELECT
+orderkey,
+orderstatus,
+totalprice / 2 AS half
+FROM orders;
+
+CREATE OR REPLACE VIEW test AS
+SELECT orderkey, orderstatus, totalprice / 4 AS quarter
+FROM orders;
diff --git a/test/fixtures/dialects/athena/create_view.yml b/test/fixtures/dialects/athena/create_view.yml
new file mode 100644
index 0000000..611886f
--- /dev/null
+++ b/test/fixtures/dialects/athena/create_view.yml
@@ -0,0 +1,79 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 145fb6ff6109069e686a48e68e4fc89cca03c4b317daf3fd87cf60e1cb440fd3
+file:
+- statement:
+    create_view_statement:
+    - keyword: CREATE
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: test
+    - keyword: AS
+    - select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: orderkey
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: orderstatus
+        - comma: ','
+        - select_clause_element:
+            expression:
+              column_reference:
+                naked_identifier: totalprice
+              binary_operator: /
+              numeric_literal: '2'
+            alias_expression:
+              keyword: AS
+              naked_identifier: half
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: orders
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: CREATE
+    - keyword: OR
+    - keyword: REPLACE
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: test
+    - keyword: AS
+    - select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: orderkey
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: orderstatus
+        - comma: ','
+        - select_clause_element:
+            expression:
+              column_reference:
+                naked_identifier: totalprice
+              binary_operator: /
+              numeric_literal: '4'
+            alias_expression:
+              keyword: AS
+              naked_identifier: quarter
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: orders
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/athena/element_at.sql b/test/fixtures/dialects/athena/element_at.sql
new file mode 100644
index 0000000..f451236
--- /dev/null
+++ b/test/fixtures/dialects/athena/element_at.sql
@@ -0,0 +1,8 @@
+SELECT
+    COALESCE(
+        element_at(rq.hiring_managers, 1),
+        element_at(rq.hiring_managers, 2),
+        rq.creator_id
+    ) AS part1,
+    element_at(pl.hiring_managers, 1).id AS part2,
+    element_at(pl.hiring_managers, 2).id AS part3;
diff --git a/test/fixtures/dialects/athena/element_at.yml b/test/fixtures/dialects/athena/element_at.yml
new file mode 100644
index 0000000..f525c94
--- /dev/null
+++ b/test/fixtures/dialects/athena/element_at.yml
@@ -0,0 +1,105 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 5127c65677a4d987ab01ccfd4bfc6a86ce2dd0f7317002ae12e04869c5812f1c
+file:
+  statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: COALESCE
+            bracketed:
+            - start_bracket: (
+            - expression:
+                function:
+                  function_name:
+                    function_name_identifier: element_at
+                  bracketed:
+                  - start_bracket: (
+                  - expression:
+                      column_reference:
+                      - naked_identifier: rq
+                      - dot: .
+                      - naked_identifier: hiring_managers
+                  - comma: ','
+                  - expression:
+                      numeric_literal: '1'
+                  - end_bracket: )
+            - comma: ','
+            - expression:
+                function:
+                  function_name:
+                    function_name_identifier: element_at
+                  bracketed:
+                  - start_bracket: (
+                  - expression:
+                      column_reference:
+                      - naked_identifier: rq
+                      - dot: .
+                      - naked_identifier: hiring_managers
+                  - comma: ','
+                  - expression:
+                      numeric_literal: '2'
+                  - end_bracket: )
+            - comma: ','
+            - expression:
+                column_reference:
+                - naked_identifier: rq
+                - dot: .
+                - naked_identifier: creator_id
+            - end_bracket: )
+          alias_expression:
+            keyword: AS
+            naked_identifier: part1
+      - comma: ','
+      - select_clause_element:
+          expression:
+            function:
+              function_name:
+                function_name_identifier: element_at
+              bracketed:
+              - start_bracket: (
+              - expression:
+                  column_reference:
+                  - naked_identifier: pl
+                  - dot: .
+                  - naked_identifier: hiring_managers
+              - comma: ','
+              - expression:
+                  numeric_literal: '1'
+              - end_bracket: )
+            dot: .
+            object_reference:
+              naked_identifier: id
+          alias_expression:
+            keyword: AS
+            naked_identifier: part2
+      - comma: ','
+      - select_clause_element:
+          expression:
+            function:
+              function_name:
+                function_name_identifier: element_at
+              bracketed:
+              - start_bracket: (
+              - expression:
+                  column_reference:
+                  - naked_identifier: pl
+                  - dot: .
+                  - naked_identifier: hiring_managers
+              - comma: ','
+              - expression:
+                  numeric_literal: '2'
+              - end_bracket: )
+            dot: .
+            object_reference:
+              naked_identifier: id
+          alias_expression:
+            keyword: AS
+            naked_identifier: part3
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/athena/prepared_statements.sql b/test/fixtures/dialects/athena/prepared_statements.sql
new file mode 100644
index 0000000..89a5278
--- /dev/null
+++ b/test/fixtures/dialects/athena/prepared_statements.sql
@@ -0,0 +1,23 @@
+PREPARE my_select1 FROM
+SELECT * FROM nation;
+
+PREPARE my_select2 FROM
+SELECT * FROM "my_database"."my_table" WHERE year = ?;
+
+PREPARE my_select3 FROM
+SELECT 'order' FROM orders WHERE productid = ? and quantity < ?;
+
+PREPARE my_insert FROM
+INSERT INTO cities_usa (city, state)
+SELECT city, state
+FROM cities_world
+WHERE country = ?;
+
+PREPARE my_unload FROM
+UNLOAD (SELECT * FROM table1 WHERE productid < ?)
+TO 's3://my_output_bucket/'
+WITH (format='PARQUET');
+
+EXECUTE statement_name;
+EXECUTE statement_name USING 'value';
+EXECUTE statement_name USING 'value', 10;
diff --git a/test/fixtures/dialects/athena/prepared_statements.yml b/test/fixtures/dialects/athena/prepared_statements.yml
new file mode 100644
index 0000000..9a4c5c3
--- /dev/null
+++ b/test/fixtures/dialects/athena/prepared_statements.yml
@@ -0,0 +1,205 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 1f85e8d54a28c0e52d2d49d051ebd3f71ef569db9a0d70e041845406903b4235
+file:
+- statement:
+    prepare_statement:
+    - keyword: PREPARE
+    - table_reference:
+        naked_identifier: my_select1
+    - keyword: FROM
+    - select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: nation
+- statement_terminator: ;
+- statement:
+    prepare_statement:
+    - keyword: PREPARE
+    - table_reference:
+        naked_identifier: my_select2
+    - keyword: FROM
+    - select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                - quoted_identifier: '"my_database"'
+                - dot: .
+                - quoted_identifier: '"my_table"'
+        where_clause:
+          keyword: WHERE
+          expression:
+            column_reference:
+              naked_identifier: year
+            comparison_operator:
+              raw_comparison_operator: '='
+            parameter: '?'
+- statement_terminator: ;
+- statement:
+    prepare_statement:
+    - keyword: PREPARE
+    - table_reference:
+        naked_identifier: my_select3
+    - keyword: FROM
+    - select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            quoted_literal: "'order'"
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: orders
+        where_clause:
+          keyword: WHERE
+          expression:
+          - column_reference:
+              naked_identifier: productid
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - parameter: '?'
+          - binary_operator: and
+          - column_reference:
+              naked_identifier: quantity
+          - comparison_operator:
+              raw_comparison_operator: <
+          - parameter: '?'
+- statement_terminator: ;
+- statement:
+    prepare_statement:
+    - keyword: PREPARE
+    - table_reference:
+        naked_identifier: my_insert
+    - keyword: FROM
+    - insert_statement:
+      - keyword: INSERT
+      - keyword: INTO
+      - table_reference:
+          naked_identifier: cities_usa
+      - bracketed:
+        - start_bracket: (
+        - column_reference:
+            naked_identifier: city
+        - comma: ','
+        - column_reference:
+            naked_identifier: state
+        - end_bracket: )
+      - select_statement:
+          select_clause:
+          - keyword: SELECT
+          - select_clause_element:
+              column_reference:
+                naked_identifier: city
+          - comma: ','
+          - select_clause_element:
+              column_reference:
+                naked_identifier: state
+          from_clause:
+            keyword: FROM
+            from_expression:
+              from_expression_element:
+                table_expression:
+                  table_reference:
+                    naked_identifier: cities_world
+          where_clause:
+            keyword: WHERE
+            expression:
+              column_reference:
+                naked_identifier: country
+              comparison_operator:
+                raw_comparison_operator: '='
+              parameter: '?'
+- statement_terminator: ;
+- statement:
+    prepare_statement:
+    - keyword: PREPARE
+    - table_reference:
+        naked_identifier: my_unload
+    - keyword: FROM
+    - unload_statement:
+      - keyword: UNLOAD
+      - bracketed:
+          start_bracket: (
+          select_statement:
+            select_clause:
+              keyword: SELECT
+              select_clause_element:
+                wildcard_expression:
+                  wildcard_identifier:
+                    star: '*'
+            from_clause:
+              keyword: FROM
+              from_expression:
+                from_expression_element:
+                  table_expression:
+                    table_reference:
+                      naked_identifier: table1
+            where_clause:
+              keyword: WHERE
+              expression:
+                column_reference:
+                  naked_identifier: productid
+                comparison_operator:
+                  raw_comparison_operator: <
+                parameter: '?'
+          end_bracket: )
+      - keyword: TO
+      - quoted_literal: "'s3://my_output_bucket/'"
+      - keyword: WITH
+      - bracketed:
+          start_bracket: (
+          keyword: format
+          comparison_operator:
+            raw_comparison_operator: '='
+          quoted_literal: "'PARQUET'"
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    execute_statement:
+      keyword: EXECUTE
+      table_reference:
+        naked_identifier: statement_name
+- statement_terminator: ;
+- statement:
+    execute_statement:
+    - keyword: EXECUTE
+    - table_reference:
+        naked_identifier: statement_name
+    - keyword: USING
+    - quoted_literal: "'value'"
+- statement_terminator: ;
+- statement:
+    execute_statement:
+    - keyword: EXECUTE
+    - table_reference:
+        naked_identifier: statement_name
+    - keyword: USING
+    - quoted_literal: "'value'"
+    - comma: ','
+    - numeric_literal: '10'
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/athena/select_a.sql b/test/fixtures/dialects/athena/select_a.sql
index bbf9a7a..21a9782 100644
--- a/test/fixtures/dialects/athena/select_a.sql
+++ b/test/fixtures/dialects/athena/select_a.sql
@@ -2,4 +2,7 @@ SELECT
   field_1
   , field_2
   , field_3
+  , time
+  , date
+  , timestamp
 FROM my_table;
diff --git a/test/fixtures/dialects/athena/select_a.yml b/test/fixtures/dialects/athena/select_a.yml
index 0b29efa..b1f78bd 100644
--- a/test/fixtures/dialects/athena/select_a.yml
+++ b/test/fixtures/dialects/athena/select_a.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 8e7d4cb0b9196934ce9c9dd9e0a5bc82a647cc7827eb71e238be79900ec6f756
+_hash: 287e4f998b4695a1d733c87a6a1ecad0735c474c0d5ed78422173e0d823736c5
 file:
   statement:
     select_statement:
@@ -20,6 +20,18 @@ file:
       - select_clause_element:
           column_reference:
             naked_identifier: field_3
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: time
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: date
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: timestamp
       from_clause:
         keyword: FROM
         from_expression:
diff --git a/test/fixtures/dialects/athena/select_array_of_rows.sql b/test/fixtures/dialects/athena/select_array_of_rows.sql
new file mode 100644
index 0000000..7b6897c
--- /dev/null
+++ b/test/fixtures/dialects/athena/select_array_of_rows.sql
@@ -0,0 +1 @@
+SELECT ARRAY[CAST(ROW(1) AS ROW(x INT))][1].x.y;
diff --git a/test/fixtures/dialects/athena/select_array_of_rows.yml b/test/fixtures/dialects/athena/select_array_of_rows.yml
new file mode 100644
index 0000000..5eba9d6
--- /dev/null
+++ b/test/fixtures/dialects/athena/select_array_of_rows.yml
@@ -0,0 +1,54 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 246b131824ddd434f87567ae1f0cf10a63055d0c33282c264e12c5f22081cd9c
+file:
+  statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+            typed_array_literal:
+              array_type:
+                keyword: ARRAY
+              array_literal:
+                start_square_bracket: '['
+                function:
+                  function_name:
+                    function_name_identifier: CAST
+                  bracketed:
+                    start_bracket: (
+                    expression:
+                      function:
+                        function_name:
+                          function_name_identifier: ROW
+                        bracketed:
+                          start_bracket: (
+                          expression:
+                            numeric_literal: '1'
+                          end_bracket: )
+                    keyword: AS
+                    data_type:
+                      keyword: ROW
+                      bracketed:
+                        start_bracket: (
+                        naked_identifier: x
+                        data_type:
+                          primitive_type:
+                            keyword: INT
+                        end_bracket: )
+                    end_bracket: )
+                end_square_bracket: ']'
+            array_accessor:
+              start_square_bracket: '['
+              numeric_literal: '1'
+              end_square_bracket: ']'
+            dot: .
+            object_reference:
+            - naked_identifier: x
+            - dot: .
+            - naked_identifier: y
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/athena/select_filter.yml b/test/fixtures/dialects/athena/select_filter.yml
index d337376..361a748 100644
--- a/test/fixtures/dialects/athena/select_filter.yml
+++ b/test/fixtures/dialects/athena/select_filter.yml
@@ -3,24 +3,26 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: a6b6aff3b1fe6fbbae5a9c418ef0bca8cf90abf31cd2f5e56c17a35f4d00fb53
+_hash: 3ce1960064878f554ec38c21008d429dadfab54d4103c548b97dc79020a5c74f
 file:
 - statement:
     select_statement:
       select_clause:
         keyword: SELECT
         select_clause_element:
-          array_literal:
-          - keyword: ARRAY
-          - start_square_bracket: '['
-          - numeric_literal: '5'
-          - comma: ','
-          - null_literal: 'NULL'
-          - comma: ','
-          - numeric_literal: '7'
-          - comma: ','
-          - null_literal: 'NULL'
-          - end_square_bracket: ']'
+          typed_array_literal:
+            array_type:
+              keyword: ARRAY
+            array_literal:
+            - start_square_bracket: '['
+            - numeric_literal: '5'
+            - comma: ','
+            - null_literal: 'NULL'
+            - comma: ','
+            - numeric_literal: '7'
+            - comma: ','
+            - null_literal: 'NULL'
+            - end_square_bracket: ']'
 - statement_terminator: ;
 - statement:
     select_statement:
@@ -33,10 +35,12 @@ file:
             bracketed:
             - start_bracket: (
             - expression:
-                array_literal:
-                  keyword: ARRAY
-                  start_square_bracket: '['
-                  end_square_bracket: ']'
+                typed_array_literal:
+                  array_type:
+                    keyword: ARRAY
+                  array_literal:
+                    start_square_bracket: '['
+                    end_square_bracket: ']'
             - comma: ','
             - expression:
                 column_reference:
@@ -56,19 +60,21 @@ file:
             bracketed:
             - start_bracket: (
             - expression:
-                array_literal:
-                - keyword: ARRAY
-                - start_square_bracket: '['
-                - numeric_literal: '5'
-                - comma: ','
-                - numeric_literal:
-                    sign_indicator: '-'
-                    numeric_literal: '6'
-                - comma: ','
-                - null_literal: 'NULL'
-                - comma: ','
-                - numeric_literal: '7'
-                - end_square_bracket: ']'
+                typed_array_literal:
+                  array_type:
+                    keyword: ARRAY
+                  array_literal:
+                  - start_square_bracket: '['
+                  - numeric_literal: '5'
+                  - comma: ','
+                  - numeric_literal:
+                      sign_indicator: '-'
+                      numeric_literal: '6'
+                  - comma: ','
+                  - null_literal: 'NULL'
+                  - comma: ','
+                  - numeric_literal: '7'
+                  - end_square_bracket: ']'
             - comma: ','
             - expression:
               - column_reference:
@@ -92,17 +98,19 @@ file:
             bracketed:
             - start_bracket: (
             - expression:
-                array_literal:
-                - keyword: ARRAY
-                - start_square_bracket: '['
-                - numeric_literal: '5'
-                - comma: ','
-                - null_literal: 'NULL'
-                - comma: ','
-                - numeric_literal: '7'
-                - comma: ','
-                - null_literal: 'NULL'
-                - end_square_bracket: ']'
+                typed_array_literal:
+                  array_type:
+                    keyword: ARRAY
+                  array_literal:
+                  - start_square_bracket: '['
+                  - numeric_literal: '5'
+                  - comma: ','
+                  - null_literal: 'NULL'
+                  - comma: ','
+                  - numeric_literal: '7'
+                  - comma: ','
+                  - null_literal: 'NULL'
+                  - end_square_bracket: ']'
             - comma: ','
             - expression:
               - column_reference:
diff --git a/test/fixtures/dialects/athena/select_map_function.sql b/test/fixtures/dialects/athena/select_map_function.sql
new file mode 100644
index 0000000..3cffaa9
--- /dev/null
+++ b/test/fixtures/dialects/athena/select_map_function.sql
@@ -0,0 +1,30 @@
+SELECT map();
+
+WITH dataset AS (
+    SELECT map(
+        ARRAY['first', 'last', 'age'],
+        ARRAY['Bob', 'Smith', '35']
+    ) AS a_map
+)
+
+SELECT a_map FROM dataset;
+
+SELECT map_filter(map(ARRAY[], ARRAY[]), (k, v) -> true);
+-- -- {}
+
+SELECT map_filter(
+    map(
+        ARRAY[10, 20, 30],
+        ARRAY['a', null, 'c']
+    ),
+    (k, v) -> v IS NOT NULL
+);
+-- -- {10 -> a, 30 -> c}
+
+SELECT map_filter(
+    map(
+        ARRAY['k1', 'k2', 'k3'],
+        ARRAY[20, 3, 15]
+    ),
+    (k, v) -> v > 10
+);
diff --git a/test/fixtures/dialects/athena/select_map_function.yml b/test/fixtures/dialects/athena/select_map_function.yml
new file mode 100644
index 0000000..c5d2d0e
--- /dev/null
+++ b/test/fixtures/dialects/athena/select_map_function.yml
@@ -0,0 +1,246 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 1abe1b4ebaac883e5732f0f6422bbb001f44fd562b9a834fec52ecb40e785427
+file:
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: map
+            bracketed:
+              start_bracket: (
+              end_bracket: )
+- statement_terminator: ;
+- statement:
+    with_compound_statement:
+      keyword: WITH
+      common_table_expression:
+        naked_identifier: dataset
+        keyword: AS
+        bracketed:
+          start_bracket: (
+          select_statement:
+            select_clause:
+              keyword: SELECT
+              select_clause_element:
+                function:
+                  function_name:
+                    function_name_identifier: map
+                  bracketed:
+                  - start_bracket: (
+                  - expression:
+                      typed_array_literal:
+                        array_type:
+                          keyword: ARRAY
+                        array_literal:
+                        - start_square_bracket: '['
+                        - quoted_literal: "'first'"
+                        - comma: ','
+                        - quoted_literal: "'last'"
+                        - comma: ','
+                        - quoted_literal: "'age'"
+                        - end_square_bracket: ']'
+                  - comma: ','
+                  - expression:
+                      typed_array_literal:
+                        array_type:
+                          keyword: ARRAY
+                        array_literal:
+                        - start_square_bracket: '['
+                        - quoted_literal: "'Bob'"
+                        - comma: ','
+                        - quoted_literal: "'Smith'"
+                        - comma: ','
+                        - quoted_literal: "'35'"
+                        - end_square_bracket: ']'
+                  - end_bracket: )
+                alias_expression:
+                  keyword: AS
+                  naked_identifier: a_map
+          end_bracket: )
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            column_reference:
+              naked_identifier: a_map
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: dataset
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: map_filter
+            bracketed:
+            - start_bracket: (
+            - expression:
+                function:
+                  function_name:
+                    function_name_identifier: map
+                  bracketed:
+                  - start_bracket: (
+                  - expression:
+                      typed_array_literal:
+                        array_type:
+                          keyword: ARRAY
+                        array_literal:
+                          start_square_bracket: '['
+                          end_square_bracket: ']'
+                  - comma: ','
+                  - expression:
+                      typed_array_literal:
+                        array_type:
+                          keyword: ARRAY
+                        array_literal:
+                          start_square_bracket: '['
+                          end_square_bracket: ']'
+                  - end_bracket: )
+            - comma: ','
+            - expression:
+                bracketed:
+                - start_bracket: (
+                - column_reference:
+                    naked_identifier: k
+                - comma: ','
+                - column_reference:
+                    naked_identifier: v
+                - end_bracket: )
+                binary_operator: ->
+                boolean_literal: 'true'
+            - end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: map_filter
+            bracketed:
+            - start_bracket: (
+            - expression:
+                function:
+                  function_name:
+                    function_name_identifier: map
+                  bracketed:
+                  - start_bracket: (
+                  - expression:
+                      typed_array_literal:
+                        array_type:
+                          keyword: ARRAY
+                        array_literal:
+                        - start_square_bracket: '['
+                        - numeric_literal: '10'
+                        - comma: ','
+                        - numeric_literal: '20'
+                        - comma: ','
+                        - numeric_literal: '30'
+                        - end_square_bracket: ']'
+                  - comma: ','
+                  - expression:
+                      typed_array_literal:
+                        array_type:
+                          keyword: ARRAY
+                        array_literal:
+                        - start_square_bracket: '['
+                        - quoted_literal: "'a'"
+                        - comma: ','
+                        - null_literal: 'null'
+                        - comma: ','
+                        - quoted_literal: "'c'"
+                        - end_square_bracket: ']'
+                  - end_bracket: )
+            - comma: ','
+            - expression:
+              - bracketed:
+                - start_bracket: (
+                - column_reference:
+                    naked_identifier: k
+                - comma: ','
+                - column_reference:
+                    naked_identifier: v
+                - end_bracket: )
+              - binary_operator: ->
+              - column_reference:
+                  naked_identifier: v
+              - keyword: IS
+              - keyword: NOT
+              - keyword: 'NULL'
+            - end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: map_filter
+            bracketed:
+            - start_bracket: (
+            - expression:
+                function:
+                  function_name:
+                    function_name_identifier: map
+                  bracketed:
+                  - start_bracket: (
+                  - expression:
+                      typed_array_literal:
+                        array_type:
+                          keyword: ARRAY
+                        array_literal:
+                        - start_square_bracket: '['
+                        - quoted_literal: "'k1'"
+                        - comma: ','
+                        - quoted_literal: "'k2'"
+                        - comma: ','
+                        - quoted_literal: "'k3'"
+                        - end_square_bracket: ']'
+                  - comma: ','
+                  - expression:
+                      typed_array_literal:
+                        array_type:
+                          keyword: ARRAY
+                        array_literal:
+                        - start_square_bracket: '['
+                        - numeric_literal: '20'
+                        - comma: ','
+                        - numeric_literal: '3'
+                        - comma: ','
+                        - numeric_literal: '15'
+                        - end_square_bracket: ']'
+                  - end_bracket: )
+            - comma: ','
+            - expression:
+                bracketed:
+                - start_bracket: (
+                - column_reference:
+                    naked_identifier: k
+                - comma: ','
+                - column_reference:
+                    naked_identifier: v
+                - end_bracket: )
+                binary_operator: ->
+                column_reference:
+                  naked_identifier: v
+                comparison_operator:
+                  raw_comparison_operator: '>'
+                numeric_literal: '10'
+            - end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/athena/select_map_type.sql b/test/fixtures/dialects/athena/select_map_type.sql
new file mode 100644
index 0000000..7e7b128
--- /dev/null
+++ b/test/fixtures/dialects/athena/select_map_type.sql
@@ -0,0 +1,8 @@
+SELECT
+    CAST(
+        JSON_PARSE(table_name.column_name) AS MAP<VARCHAR, VARCHAR>
+    ) AS json_map
+FROM table_name;
+
+CREATE TABLE map_table(c1 map<string, integer>) LOCATION '...';
+INSERT INTO map_table values(MAP(ARRAY['foo', 'bar'], ARRAY[1, 2]));
diff --git a/test/fixtures/dialects/athena/select_map_type.yml b/test/fixtures/dialects/athena/select_map_type.yml
new file mode 100644
index 0000000..8700e4d
--- /dev/null
+++ b/test/fixtures/dialects/athena/select_map_type.yml
@@ -0,0 +1,120 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: ccb2a26dc4a874a4a86947e81c2e5548f2ec40c503dce5a045498898229c542a
+file:
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: CAST
+            bracketed:
+              start_bracket: (
+              expression:
+                function:
+                  function_name:
+                    function_name_identifier: JSON_PARSE
+                  bracketed:
+                    start_bracket: (
+                    expression:
+                      column_reference:
+                      - naked_identifier: table_name
+                      - dot: .
+                      - naked_identifier: column_name
+                    end_bracket: )
+              keyword: AS
+              data_type:
+                map_type:
+                  keyword: MAP
+                  map_type_schema:
+                    start_angle_bracket: <
+                    primitive_type:
+                      keyword: VARCHAR
+                    comma: ','
+                    data_type:
+                      primitive_type:
+                        keyword: VARCHAR
+                    end_angle_bracket: '>'
+              end_bracket: )
+          alias_expression:
+            keyword: AS
+            naked_identifier: json_map
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: table_name
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: map_table
+    - bracketed:
+        start_bracket: (
+        column_definition:
+          naked_identifier: c1
+          data_type:
+            map_type:
+              keyword: map
+              map_type_schema:
+                start_angle_bracket: <
+                primitive_type:
+                  keyword: string
+                comma: ','
+                data_type:
+                  primitive_type:
+                    keyword: integer
+                end_angle_bracket: '>'
+        end_bracket: )
+    - keyword: LOCATION
+    - quoted_literal: "'...'"
+- statement_terminator: ;
+- statement:
+    insert_statement:
+    - keyword: INSERT
+    - keyword: INTO
+    - table_reference:
+        naked_identifier: map_table
+    - values_clause:
+        keyword: values
+        bracketed:
+          start_bracket: (
+          expression:
+            function:
+              function_name:
+                function_name_identifier: MAP
+              bracketed:
+              - start_bracket: (
+              - expression:
+                  typed_array_literal:
+                    array_type:
+                      keyword: ARRAY
+                    array_literal:
+                    - start_square_bracket: '['
+                    - quoted_literal: "'foo'"
+                    - comma: ','
+                    - quoted_literal: "'bar'"
+                    - end_square_bracket: ']'
+              - comma: ','
+              - expression:
+                  typed_array_literal:
+                    array_type:
+                      keyword: ARRAY
+                    array_literal:
+                    - start_square_bracket: '['
+                    - numeric_literal: '1'
+                    - comma: ','
+                    - numeric_literal: '2'
+                    - end_square_bracket: ']'
+              - end_bracket: )
+          end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/athena/select_reduce.yml b/test/fixtures/dialects/athena/select_reduce.yml
index c69a445..8416c02 100644
--- a/test/fixtures/dialects/athena/select_reduce.yml
+++ b/test/fixtures/dialects/athena/select_reduce.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: ad1d9abd13e8ba86dee71e83e8d19856891b3a75a42f45960e554444de1cc8f7
+_hash: feb95eedec538fb26ced4e00655755b22b368d8bd2dc4ad12e3460e80692ec73
 file:
 - statement:
     select_statement:
@@ -16,10 +16,12 @@ file:
             bracketed:
             - start_bracket: (
             - expression:
-                array_literal:
-                  keyword: ARRAY
-                  start_square_bracket: '['
-                  end_square_bracket: ']'
+                typed_array_literal:
+                  array_type:
+                    keyword: ARRAY
+                  array_literal:
+                    start_square_bracket: '['
+                    end_square_bracket: ']'
             - comma: ','
             - expression:
                 numeric_literal: '0'
@@ -59,15 +61,17 @@ file:
             bracketed:
             - start_bracket: (
             - expression:
-                array_literal:
-                - keyword: ARRAY
-                - start_square_bracket: '['
-                - numeric_literal: '5'
-                - comma: ','
-                - numeric_literal: '20'
-                - comma: ','
-                - numeric_literal: '50'
-                - end_square_bracket: ']'
+                typed_array_literal:
+                  array_type:
+                    keyword: ARRAY
+                  array_literal:
+                  - start_square_bracket: '['
+                  - numeric_literal: '5'
+                  - comma: ','
+                  - numeric_literal: '20'
+                  - comma: ','
+                  - numeric_literal: '50'
+                  - end_square_bracket: ']'
             - comma: ','
             - expression:
                 numeric_literal: '0'
@@ -107,17 +111,19 @@ file:
             bracketed:
             - start_bracket: (
             - expression:
-                array_literal:
-                - keyword: ARRAY
-                - start_square_bracket: '['
-                - numeric_literal: '5'
-                - comma: ','
-                - numeric_literal: '20'
-                - comma: ','
-                - null_literal: 'NULL'
-                - comma: ','
-                - numeric_literal: '50'
-                - end_square_bracket: ']'
+                typed_array_literal:
+                  array_type:
+                    keyword: ARRAY
+                  array_literal:
+                  - start_square_bracket: '['
+                  - numeric_literal: '5'
+                  - comma: ','
+                  - numeric_literal: '20'
+                  - comma: ','
+                  - null_literal: 'NULL'
+                  - comma: ','
+                  - numeric_literal: '50'
+                  - end_square_bracket: ']'
             - comma: ','
             - expression:
                 numeric_literal: '0'
@@ -157,17 +163,19 @@ file:
             bracketed:
             - start_bracket: (
             - expression:
-                array_literal:
-                - keyword: ARRAY
-                - start_square_bracket: '['
-                - numeric_literal: '5'
-                - comma: ','
-                - numeric_literal: '20'
-                - comma: ','
-                - null_literal: 'NULL'
-                - comma: ','
-                - numeric_literal: '50'
-                - end_square_bracket: ']'
+                typed_array_literal:
+                  array_type:
+                    keyword: ARRAY
+                  array_literal:
+                  - start_square_bracket: '['
+                  - numeric_literal: '5'
+                  - comma: ','
+                  - numeric_literal: '20'
+                  - comma: ','
+                  - null_literal: 'NULL'
+                  - comma: ','
+                  - numeric_literal: '50'
+                  - end_square_bracket: ']'
             - comma: ','
             - expression:
                 numeric_literal: '0'
@@ -217,17 +225,19 @@ file:
             bracketed:
             - start_bracket: (
             - expression:
-                array_literal:
-                - keyword: ARRAY
-                - start_square_bracket: '['
-                - numeric_literal: '5'
-                - comma: ','
-                - numeric_literal: '20'
-                - comma: ','
-                - null_literal: 'NULL'
-                - comma: ','
-                - numeric_literal: '50'
-                - end_square_bracket: ']'
+                typed_array_literal:
+                  array_type:
+                    keyword: ARRAY
+                  array_literal:
+                  - start_square_bracket: '['
+                  - numeric_literal: '5'
+                  - comma: ','
+                  - numeric_literal: '20'
+                  - comma: ','
+                  - null_literal: 'NULL'
+                  - comma: ','
+                  - numeric_literal: '50'
+                  - end_square_bracket: ']'
             - comma: ','
             - expression:
                 numeric_literal: '0'
@@ -284,13 +294,15 @@ file:
             bracketed:
             - start_bracket: (
             - expression:
-                array_literal:
-                - keyword: ARRAY
-                - start_square_bracket: '['
-                - numeric_literal: '2147483647'
-                - comma: ','
-                - numeric_literal: '1'
-                - end_square_bracket: ']'
+                typed_array_literal:
+                  array_type:
+                    keyword: ARRAY
+                  array_literal:
+                  - start_square_bracket: '['
+                  - numeric_literal: '2147483647'
+                  - comma: ','
+                  - numeric_literal: '1'
+                  - end_square_bracket: ']'
             - comma: ','
             - expression:
                 function:
@@ -341,17 +353,19 @@ file:
             bracketed:
             - start_bracket: (
             - expression:
-                array_literal:
-                - keyword: ARRAY
-                - start_square_bracket: '['
-                - numeric_literal: '5'
-                - comma: ','
-                - numeric_literal: '6'
-                - comma: ','
-                - numeric_literal: '10'
-                - comma: ','
-                - numeric_literal: '20'
-                - end_square_bracket: ']'
+                typed_array_literal:
+                  array_type:
+                    keyword: ARRAY
+                  array_literal:
+                  - start_square_bracket: '['
+                  - numeric_literal: '5'
+                  - comma: ','
+                  - numeric_literal: '6'
+                  - comma: ','
+                  - numeric_literal: '10'
+                  - comma: ','
+                  - numeric_literal: '20'
+                  - end_square_bracket: ']'
             - comma: ','
             - expression:
                 function:
diff --git a/test/fixtures/dialects/athena/select_row.sql b/test/fixtures/dialects/athena/select_row.sql
index 936c2db..7d91135 100644
--- a/test/fixtures/dialects/athena/select_row.sql
+++ b/test/fixtures/dialects/athena/select_row.sql
@@ -1,2 +1,21 @@
 SELECT ROW(1, 2.0);
+
 SELECT CAST(ROW(1, 2.0) AS ROW(x BIGINT, y DOUBLE));
+
+SELECT ARRAY[CAST(ROW(1) AS ROW(x INT))][1].x;
+
+SELECT
+    CAST(
+        ROW(
+            ARRAY[
+                CAST(ROW('') AS ROW(id varchar))
+            ],
+            CAST(ROW('') AS ROW(id varchar)),
+            'Approved'
+        ) AS ROW(
+            approvers ARRAY<ROW(id varchar)>,
+            performer ROW(id varchar),
+            approvalStatus varchar
+        )
+    ) as test;
+
diff --git a/test/fixtures/dialects/athena/select_row.yml b/test/fixtures/dialects/athena/select_row.yml
index e51d6ac..c56bf8a 100644
--- a/test/fixtures/dialects/athena/select_row.yml
+++ b/test/fixtures/dialects/athena/select_row.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 1096522cd7003f3707936fb1159ae78b20dbde06dcbc6b218eaafd7300d18e8c
+_hash: f7fa9aefe356b727b9a732c5acbae4cb77decee6ec6a4925dfc537c64194b225
 file:
 - statement:
     select_statement:
@@ -61,3 +61,170 @@ file:
                 - end_bracket: )
               end_bracket: )
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+            typed_array_literal:
+              array_type:
+                keyword: ARRAY
+              array_literal:
+                start_square_bracket: '['
+                function:
+                  function_name:
+                    function_name_identifier: CAST
+                  bracketed:
+                    start_bracket: (
+                    expression:
+                      function:
+                        function_name:
+                          function_name_identifier: ROW
+                        bracketed:
+                          start_bracket: (
+                          expression:
+                            numeric_literal: '1'
+                          end_bracket: )
+                    keyword: AS
+                    data_type:
+                      keyword: ROW
+                      bracketed:
+                        start_bracket: (
+                        naked_identifier: x
+                        data_type:
+                          primitive_type:
+                            keyword: INT
+                        end_bracket: )
+                    end_bracket: )
+                end_square_bracket: ']'
+            array_accessor:
+              start_square_bracket: '['
+              numeric_literal: '1'
+              end_square_bracket: ']'
+            dot: .
+            object_reference:
+              naked_identifier: x
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: CAST
+            bracketed:
+              start_bracket: (
+              expression:
+                function:
+                  function_name:
+                    function_name_identifier: ROW
+                  bracketed:
+                  - start_bracket: (
+                  - expression:
+                      typed_array_literal:
+                        array_type:
+                          keyword: ARRAY
+                        array_literal:
+                          start_square_bracket: '['
+                          function:
+                            function_name:
+                              function_name_identifier: CAST
+                            bracketed:
+                              start_bracket: (
+                              expression:
+                                function:
+                                  function_name:
+                                    function_name_identifier: ROW
+                                  bracketed:
+                                    start_bracket: (
+                                    expression:
+                                      quoted_literal: "''"
+                                    end_bracket: )
+                              keyword: AS
+                              data_type:
+                                keyword: ROW
+                                bracketed:
+                                  start_bracket: (
+                                  naked_identifier: id
+                                  data_type:
+                                    primitive_type:
+                                      keyword: varchar
+                                  end_bracket: )
+                              end_bracket: )
+                          end_square_bracket: ']'
+                  - comma: ','
+                  - expression:
+                      function:
+                        function_name:
+                          function_name_identifier: CAST
+                        bracketed:
+                          start_bracket: (
+                          expression:
+                            function:
+                              function_name:
+                                function_name_identifier: ROW
+                              bracketed:
+                                start_bracket: (
+                                expression:
+                                  quoted_literal: "''"
+                                end_bracket: )
+                          keyword: AS
+                          data_type:
+                            keyword: ROW
+                            bracketed:
+                              start_bracket: (
+                              naked_identifier: id
+                              data_type:
+                                primitive_type:
+                                  keyword: varchar
+                              end_bracket: )
+                          end_bracket: )
+                  - comma: ','
+                  - expression:
+                      quoted_literal: "'Approved'"
+                  - end_bracket: )
+              keyword: AS
+              data_type:
+                keyword: ROW
+                bracketed:
+                - start_bracket: (
+                - naked_identifier: approvers
+                - data_type:
+                    array_type:
+                      keyword: ARRAY
+                      array_type_schema:
+                        start_angle_bracket: <
+                        data_type:
+                          keyword: ROW
+                          bracketed:
+                            start_bracket: (
+                            naked_identifier: id
+                            data_type:
+                              primitive_type:
+                                keyword: varchar
+                            end_bracket: )
+                        end_angle_bracket: '>'
+                - comma: ','
+                - naked_identifier: performer
+                - data_type:
+                    keyword: ROW
+                    bracketed:
+                      start_bracket: (
+                      naked_identifier: id
+                      data_type:
+                        primitive_type:
+                          keyword: varchar
+                      end_bracket: )
+                - comma: ','
+                - naked_identifier: approvalStatus
+                - data_type:
+                    primitive_type:
+                      keyword: varchar
+                - end_bracket: )
+              end_bracket: )
+          alias_expression:
+            keyword: as
+            naked_identifier: test
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/athena/select_underscore.sql b/test/fixtures/dialects/athena/select_underscore.sql
index 64c8798..0557802 100644
--- a/test/fixtures/dialects/athena/select_underscore.sql
+++ b/test/fixtures/dialects/athena/select_underscore.sql
@@ -2,6 +2,8 @@ SELECT 1 AS _;
 
 SELECT 1 AS __;
 
+SELECT 1 AS __TEST;
+
 SELECT a
 FROM (
 VALUES ('a'), ('b')
diff --git a/test/fixtures/dialects/athena/select_underscore.yml b/test/fixtures/dialects/athena/select_underscore.yml
index c88fa29..84a64fa 100644
--- a/test/fixtures/dialects/athena/select_underscore.yml
+++ b/test/fixtures/dialects/athena/select_underscore.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 40356891ee431750530c54abf858185798c2811e5623e22a09be65c93258daf3
+_hash: eb8702f25cd1de35b8140b81706fbad6ffc0a8dceee1a6f48739822b96f30b03
 file:
 - statement:
     select_statement:
@@ -25,6 +25,16 @@ file:
             keyword: AS
             naked_identifier: __
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          numeric_literal: '1'
+          alias_expression:
+            keyword: AS
+            naked_identifier: __TEST
+- statement_terminator: ;
 - statement:
     select_statement:
       select_clause:
diff --git a/test/fixtures/dialects/athena/unload_select.yml b/test/fixtures/dialects/athena/unload_select.yml
index 90b7ee0..25af7a5 100644
--- a/test/fixtures/dialects/athena/unload_select.yml
+++ b/test/fixtures/dialects/athena/unload_select.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: fee23c92023bf3d3cc4811b1bde6f35bc684d179d3f3ed3377345add96fe6e4d
+_hash: 97416e6461f0ca98486c4cddc02056d41faa0e64cfda38300f45495e37e58571
 file:
   statement:
     unload_statement:
@@ -51,11 +51,13 @@ file:
       - keyword: partitioned_by
       - comparison_operator:
           raw_comparison_operator: '='
-      - array_literal:
-          keyword: ARRAY
-          start_square_bracket: '['
-          column_reference:
-            naked_identifier: field_2
-          end_square_bracket: ']'
+      - typed_array_literal:
+          array_type:
+            keyword: ARRAY
+          array_literal:
+            start_square_bracket: '['
+            column_reference:
+              naked_identifier: field_2
+            end_square_bracket: ']'
       - end_bracket: )
   statement_terminator: ;
diff --git a/test/fixtures/dialects/bigquery/alter_table_add_column.sql b/test/fixtures/dialects/bigquery/alter_table_add_column.sql
new file mode 100644
index 0000000..74e5019
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/alter_table_add_column.sql
@@ -0,0 +1,13 @@
+ALTER TABLE mydataset.mytable
+  ADD COLUMN A STRING,
+  ADD COLUMN IF NOT EXISTS B GEOGRAPHY,
+  ADD COLUMN C ARRAY<NUMERIC>,
+  ADD COLUMN D DATE OPTIONS(description="my description");
+
+ALTER TABLE mydataset.mytable
+   ADD COLUMN A STRUCT<
+       B GEOGRAPHY,
+       C ARRAY<INT64>,
+       D INT64 NOT NULL,
+       E TIMESTAMP OPTIONS(description="creation time")
+       >;
diff --git a/test/fixtures/dialects/bigquery/alter_table_add_column.yml b/test/fixtures/dialects/bigquery/alter_table_add_column.yml
new file mode 100644
index 0000000..6e9b2d9
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/alter_table_add_column.yml
@@ -0,0 +1,111 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 11ca5f74cdd2c59f465e6e8a9e7a57d7717e10129695dcec229045391f266f4a
+file:
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: mydataset
+      - dot: .
+      - naked_identifier: mytable
+    - keyword: ADD
+    - keyword: COLUMN
+    - column_definition:
+        naked_identifier: A
+        data_type:
+          data_type_identifier: STRING
+    - comma: ','
+    - keyword: ADD
+    - keyword: COLUMN
+    - keyword: IF
+    - keyword: NOT
+    - keyword: EXISTS
+    - column_definition:
+        naked_identifier: B
+        data_type:
+          data_type_identifier: GEOGRAPHY
+    - comma: ','
+    - keyword: ADD
+    - keyword: COLUMN
+    - column_definition:
+        naked_identifier: C
+        data_type:
+          array_type:
+            keyword: ARRAY
+            start_angle_bracket: <
+            data_type:
+              data_type_identifier: NUMERIC
+            end_angle_bracket: '>'
+    - comma: ','
+    - keyword: ADD
+    - keyword: COLUMN
+    - column_definition:
+        naked_identifier: D
+        data_type:
+          data_type_identifier: DATE
+        options_segment:
+          keyword: OPTIONS
+          bracketed:
+            start_bracket: (
+            parameter: description
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: '"my description"'
+            end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: mydataset
+      - dot: .
+      - naked_identifier: mytable
+    - keyword: ADD
+    - keyword: COLUMN
+    - column_definition:
+        naked_identifier: A
+        data_type:
+          struct_type:
+            keyword: STRUCT
+            struct_type_schema:
+            - start_angle_bracket: <
+            - parameter: B
+            - data_type:
+                data_type_identifier: GEOGRAPHY
+            - comma: ','
+            - parameter: C
+            - data_type:
+                array_type:
+                  keyword: ARRAY
+                  start_angle_bracket: <
+                  data_type:
+                    data_type_identifier: INT64
+                  end_angle_bracket: '>'
+            - comma: ','
+            - parameter: D
+            - data_type:
+                data_type_identifier: INT64
+            - column_constraint_segment:
+              - keyword: NOT
+              - keyword: 'NULL'
+            - comma: ','
+            - parameter: E
+            - data_type:
+                data_type_identifier: TIMESTAMP
+            - options_segment:
+                keyword: OPTIONS
+                bracketed:
+                  start_bracket: (
+                  parameter: description
+                  comparison_operator:
+                    raw_comparison_operator: '='
+                  quoted_literal: '"creation time"'
+                  end_bracket: )
+            - end_angle_bracket: '>'
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/bigquery/alter_table_alter_column.sql b/test/fixtures/dialects/bigquery/alter_table_alter_column.sql
new file mode 100644
index 0000000..b9ba7c3
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/alter_table_alter_column.sql
@@ -0,0 +1,10 @@
+ALTER TABLE mydataset.mytable
+ALTER COLUMN IF EXISTS A SET OPTIONS (
+        description='some description here'
+),
+ALTER COLUMN IF EXISTS B DROP NOT NULL,
+ALTER COLUMN IF EXISTS C DROP DEFAULT,
+ALTER COLUMN IF EXISTS D SET DATA TYPE FLOAT64,
+ALTER COLUMN IF EXISTS E SET DEFAULT 0,
+ALTER COLUMN IF EXISTS F SET DEFAULT CURRENT_TIMESTAMP()
+;
diff --git a/test/fixtures/dialects/bigquery/alter_table_alter_column.yml b/test/fixtures/dialects/bigquery/alter_table_alter_column.yml
new file mode 100644
index 0000000..70db7d4
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/alter_table_alter_column.yml
@@ -0,0 +1,82 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: db72467b0a47c3b7dc7b7528b5298b87bf78740e19446fed70a2fec6f3862b63
+file:
+  statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: mydataset
+      - dot: .
+      - naked_identifier: mytable
+    - keyword: ALTER
+    - keyword: COLUMN
+    - keyword: IF
+    - keyword: EXISTS
+    - naked_identifier: A
+    - keyword: SET
+    - options_segment:
+        keyword: OPTIONS
+        bracketed:
+          start_bracket: (
+          parameter: description
+          comparison_operator:
+            raw_comparison_operator: '='
+          quoted_literal: "'some description here'"
+          end_bracket: )
+    - comma: ','
+    - keyword: ALTER
+    - keyword: COLUMN
+    - keyword: IF
+    - keyword: EXISTS
+    - naked_identifier: B
+    - keyword: DROP
+    - keyword: NOT
+    - keyword: 'NULL'
+    - comma: ','
+    - keyword: ALTER
+    - keyword: COLUMN
+    - keyword: IF
+    - keyword: EXISTS
+    - naked_identifier: C
+    - keyword: DROP
+    - keyword: DEFAULT
+    - comma: ','
+    - keyword: ALTER
+    - keyword: COLUMN
+    - keyword: IF
+    - keyword: EXISTS
+    - naked_identifier: D
+    - keyword: SET
+    - keyword: DATA
+    - keyword: TYPE
+    - data_type:
+        data_type_identifier: FLOAT64
+    - comma: ','
+    - keyword: ALTER
+    - keyword: COLUMN
+    - keyword: IF
+    - keyword: EXISTS
+    - naked_identifier: E
+    - keyword: SET
+    - keyword: DEFAULT
+    - numeric_literal: '0'
+    - comma: ','
+    - keyword: ALTER
+    - keyword: COLUMN
+    - keyword: IF
+    - keyword: EXISTS
+    - naked_identifier: F
+    - keyword: SET
+    - keyword: DEFAULT
+    - function:
+        function_name:
+          function_name_identifier: CURRENT_TIMESTAMP
+        bracketed:
+          start_bracket: (
+          end_bracket: )
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/bigquery/alter_table_drop_column.sql b/test/fixtures/dialects/bigquery/alter_table_drop_column.sql
new file mode 100644
index 0000000..3b49334
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/alter_table_drop_column.sql
@@ -0,0 +1,3 @@
+ALTER TABLE mydataset.mytable
+  DROP COLUMN A,
+  DROP COLUMN IF EXISTS B;
diff --git a/test/fixtures/dialects/bigquery/alter_table_drop_column.yml b/test/fixtures/dialects/bigquery/alter_table_drop_column.yml
new file mode 100644
index 0000000..e0a3a56
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/alter_table_drop_column.yml
@@ -0,0 +1,25 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: aeacd5c9b123877e49a7d44a88ed9f7b000f317f00b87f018609c220839906ab
+file:
+  statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: mydataset
+      - dot: .
+      - naked_identifier: mytable
+    - keyword: DROP
+    - keyword: COLUMN
+    - naked_identifier: A
+    - comma: ','
+    - keyword: DROP
+    - keyword: COLUMN
+    - keyword: IF
+    - keyword: EXISTS
+    - naked_identifier: B
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/bigquery/alter_table_rename_column.sql b/test/fixtures/dialects/bigquery/alter_table_rename_column.sql
new file mode 100644
index 0000000..e4a2dc7
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/alter_table_rename_column.sql
@@ -0,0 +1,8 @@
+ALTER TABLE mydataset.mytable
+  RENAME COLUMN A TO columnA,
+  RENAME COLUMN IF EXISTS B TO columnB;
+
+ALTER TABLE mydataset.mytable
+  RENAME COLUMN columnA TO temp,
+  RENAME COLUMN columnB TO columnA,
+  RENAME COLUMN temp TO columnB;
diff --git a/test/fixtures/dialects/bigquery/alter_table_rename_column.yml b/test/fixtures/dialects/bigquery/alter_table_rename_column.yml
new file mode 100644
index 0000000..4e22e8a
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/alter_table_rename_column.yml
@@ -0,0 +1,55 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 4aa26e86554f6b3078f8555a0121be2fd24f333e52d54d9f7ef94346ca7d0e1e
+file:
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: mydataset
+      - dot: .
+      - naked_identifier: mytable
+    - keyword: RENAME
+    - keyword: COLUMN
+    - naked_identifier: A
+    - keyword: TO
+    - naked_identifier: columnA
+    - comma: ','
+    - keyword: RENAME
+    - keyword: COLUMN
+    - keyword: IF
+    - keyword: EXISTS
+    - naked_identifier: B
+    - keyword: TO
+    - naked_identifier: columnB
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: mydataset
+      - dot: .
+      - naked_identifier: mytable
+    - keyword: RENAME
+    - keyword: COLUMN
+    - naked_identifier: columnA
+    - keyword: TO
+    - naked_identifier: temp
+    - comma: ','
+    - keyword: RENAME
+    - keyword: COLUMN
+    - naked_identifier: columnB
+    - keyword: TO
+    - naked_identifier: columnA
+    - comma: ','
+    - keyword: RENAME
+    - keyword: COLUMN
+    - naked_identifier: temp
+    - keyword: TO
+    - naked_identifier: columnB
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/bigquery/alter_table_rename_to.sql b/test/fixtures/dialects/bigquery/alter_table_rename_to.sql
new file mode 100644
index 0000000..315600a
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/alter_table_rename_to.sql
@@ -0,0 +1 @@
+ALTER TABLE mydataset.mytable RENAME TO mynewtable;
diff --git a/test/fixtures/dialects/bigquery/alter_table_rename_to.yml b/test/fixtures/dialects/bigquery/alter_table_rename_to.yml
new file mode 100644
index 0000000..d330515
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/alter_table_rename_to.yml
@@ -0,0 +1,20 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 6ead70684bf792fdc289833285129002f41a92bb5f4c930fb4279bb04aed655c
+file:
+  statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: mydataset
+      - dot: .
+      - naked_identifier: mytable
+    - keyword: RENAME
+    - keyword: TO
+    - table_reference:
+        naked_identifier: mynewtable
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/bigquery/alter_table_set_options.sql b/test/fixtures/dialects/bigquery/alter_table_set_options.sql
new file mode 100644
index 0000000..07f1d7d
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/alter_table_set_options.sql
@@ -0,0 +1,9 @@
+ALTER TABLE mydataset.mytable
+SET OPTIONS (
+  expiration_timestamp=TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 7 DAY),
+  description="Table that expires seven days from now"
+);
+
+ALTER TABLE table
+SET OPTIONS (expiration_timestamp = NULL)
+;
diff --git a/test/fixtures/dialects/bigquery/alter_table_set_options.yml b/test/fixtures/dialects/bigquery/alter_table_set_options.yml
new file mode 100644
index 0000000..7978e5c
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/alter_table_set_options.yml
@@ -0,0 +1,67 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 337e774718873b99a59ccc416f8dfd887b30f51835c7bc1af3b10649192a557d
+file:
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: mydataset
+      - dot: .
+      - naked_identifier: mytable
+    - keyword: SET
+    - options_segment:
+        keyword: OPTIONS
+        bracketed:
+        - start_bracket: (
+        - parameter: expiration_timestamp
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - function:
+            function_name:
+              function_name_identifier: TIMESTAMP_ADD
+            bracketed:
+            - start_bracket: (
+            - expression:
+                function:
+                  function_name:
+                    function_name_identifier: CURRENT_TIMESTAMP
+                  bracketed:
+                    start_bracket: (
+                    end_bracket: )
+            - comma: ','
+            - expression:
+                interval_expression:
+                  keyword: INTERVAL
+                  expression:
+                    numeric_literal: '7'
+                  date_part: DAY
+            - end_bracket: )
+        - comma: ','
+        - parameter: description
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - quoted_literal: '"Table that expires seven days from now"'
+        - end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: table
+    - keyword: SET
+    - options_segment:
+        keyword: OPTIONS
+        bracketed:
+          start_bracket: (
+          parameter: expiration_timestamp
+          comparison_operator:
+            raw_comparison_operator: '='
+          null_literal: 'NULL'
+          end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/bigquery/alter_view_set_options.sql b/test/fixtures/dialects/bigquery/alter_view_set_options.sql
new file mode 100644
index 0000000..4a990c9
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/alter_view_set_options.sql
@@ -0,0 +1,5 @@
+ALTER VIEW mydataset.myview
+SET OPTIONS (
+  expiration_timestamp=TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 7 DAY),
+  description="View that expires seven days from now"
+);
diff --git a/test/fixtures/dialects/bigquery/alter_view_set_options.yml b/test/fixtures/dialects/bigquery/alter_view_set_options.yml
new file mode 100644
index 0000000..39dc561
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/alter_view_set_options.yml
@@ -0,0 +1,50 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 5f71083b1799f2398c52344de1bf9e0d4fe5bd38e8de20622f9d591948264bde
+file:
+  statement:
+    alter_view_statement:
+    - keyword: ALTER
+    - keyword: VIEW
+    - table_reference:
+      - naked_identifier: mydataset
+      - dot: .
+      - naked_identifier: myview
+    - keyword: SET
+    - options_segment:
+        keyword: OPTIONS
+        bracketed:
+        - start_bracket: (
+        - parameter: expiration_timestamp
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - function:
+            function_name:
+              function_name_identifier: TIMESTAMP_ADD
+            bracketed:
+            - start_bracket: (
+            - expression:
+                function:
+                  function_name:
+                    function_name_identifier: CURRENT_TIMESTAMP
+                  bracketed:
+                    start_bracket: (
+                    end_bracket: )
+            - comma: ','
+            - expression:
+                interval_expression:
+                  keyword: INTERVAL
+                  expression:
+                    numeric_literal: '7'
+                  date_part: DAY
+            - end_bracket: )
+        - comma: ','
+        - parameter: description
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - quoted_literal: '"View that expires seven days from now"'
+        - end_bracket: )
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/bigquery/assert.sql b/test/fixtures/dialects/bigquery/assert.sql
new file mode 100644
index 0000000..249b813
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/assert.sql
@@ -0,0 +1,11 @@
+ASSERT (
+  (SELECT COUNT(*) FROM UNNEST([1, 2, 3, 4, 5, 6])) > 5
+) AS 'Table must contain more than 5 rows.';
+
+ASSERT
+  EXISTS(
+    SELECT X
+    FROM UNNEST([7877, 7879, 7883, 7901, 7907]) AS X
+    WHERE X = 7919
+  )
+AS 'Column X must contain the value 7919';
diff --git a/test/fixtures/dialects/bigquery/assert.yml b/test/fixtures/dialects/bigquery/assert.yml
new file mode 100644
index 0000000..b329006
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/assert.yml
@@ -0,0 +1,114 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 22c4c59aa20ff6975daccb6e8dd8da9b0c33c1bb97c7426f9ac80f4e94f33fbc
+file:
+- statement:
+    assert_statement:
+    - keyword: ASSERT
+    - expression:
+        bracketed:
+          start_bracket: (
+          expression:
+            bracketed:
+              start_bracket: (
+              expression:
+                select_statement:
+                  select_clause:
+                    keyword: SELECT
+                    select_clause_element:
+                      function:
+                        function_name:
+                          function_name_identifier: COUNT
+                        bracketed:
+                          start_bracket: (
+                          star: '*'
+                          end_bracket: )
+                  from_clause:
+                    keyword: FROM
+                    from_expression:
+                      from_expression_element:
+                        table_expression:
+                          function:
+                            function_name:
+                              function_name_identifier: UNNEST
+                            bracketed:
+                              start_bracket: (
+                              expression:
+                                array_literal:
+                                - start_square_bracket: '['
+                                - numeric_literal: '1'
+                                - comma: ','
+                                - numeric_literal: '2'
+                                - comma: ','
+                                - numeric_literal: '3'
+                                - comma: ','
+                                - numeric_literal: '4'
+                                - comma: ','
+                                - numeric_literal: '5'
+                                - comma: ','
+                                - numeric_literal: '6'
+                                - end_square_bracket: ']'
+                              end_bracket: )
+              end_bracket: )
+            comparison_operator:
+              raw_comparison_operator: '>'
+            numeric_literal: '5'
+          end_bracket: )
+    - keyword: AS
+    - quoted_literal: "'Table must contain more than 5 rows.'"
+- statement_terminator: ;
+- statement:
+    assert_statement:
+    - keyword: ASSERT
+    - expression:
+        keyword: EXISTS
+        bracketed:
+          start_bracket: (
+          select_statement:
+            select_clause:
+              keyword: SELECT
+              select_clause_element:
+                column_reference:
+                  naked_identifier: X
+            from_clause:
+              keyword: FROM
+              from_expression:
+                from_expression_element:
+                  table_expression:
+                    function:
+                      function_name:
+                        function_name_identifier: UNNEST
+                      bracketed:
+                        start_bracket: (
+                        expression:
+                          array_literal:
+                          - start_square_bracket: '['
+                          - numeric_literal: '7877'
+                          - comma: ','
+                          - numeric_literal: '7879'
+                          - comma: ','
+                          - numeric_literal: '7883'
+                          - comma: ','
+                          - numeric_literal: '7901'
+                          - comma: ','
+                          - numeric_literal: '7907'
+                          - end_square_bracket: ']'
+                        end_bracket: )
+                  alias_expression:
+                    keyword: AS
+                    naked_identifier: X
+            where_clause:
+              keyword: WHERE
+              expression:
+                column_reference:
+                  naked_identifier: X
+                comparison_operator:
+                  raw_comparison_operator: '='
+                numeric_literal: '7919'
+          end_bracket: )
+    - keyword: AS
+    - quoted_literal: "'Column X must contain the value 7919'"
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/bigquery/create_js_function_complex_types.yml b/test/fixtures/dialects/bigquery/create_js_function_complex_types.yml
index bc6f4cf..56d44bb 100644
--- a/test/fixtures/dialects/bigquery/create_js_function_complex_types.yml
+++ b/test/fixtures/dialects/bigquery/create_js_function_complex_types.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 22d3deb680071f3dd8bc74a94c9209caa5ca703db4d73db66266ad291c4162cf
+_hash: 8f953523c623c2f0f692913757e4e5b6b3166da87fe5b8f571796a8e326977cb
 file:
   statement:
     create_function_statement:
@@ -21,77 +21,85 @@ file:
         - comma: ','
         - parameter: foo2
         - data_type:
-            keyword: ARRAY
-            start_angle_bracket: <
-            data_type:
-              data_type_identifier: STRING
-            end_angle_bracket: '>'
+            array_type:
+              keyword: ARRAY
+              start_angle_bracket: <
+              data_type:
+                data_type_identifier: STRING
+              end_angle_bracket: '>'
         - comma: ','
         - parameter: foo3
         - data_type:
             struct_type:
               keyword: STRUCT
-              start_angle_bracket: <
-              parameter: x
-              data_type:
-                data_type_identifier: INT64
-              end_angle_bracket: '>'
+              struct_type_schema:
+                start_angle_bracket: <
+                parameter: x
+                data_type:
+                  data_type_identifier: INT64
+                end_angle_bracket: '>'
         - comma: ','
         - parameter: foo4
         - data_type:
             struct_type:
-            - keyword: STRUCT
-            - start_angle_bracket: <
-            - parameter: x
-            - data_type:
-                data_type_identifier: INT64
-            - comma: ','
-            - parameter: y
-            - data_type:
-                data_type_identifier: INT64
-            - end_angle_bracket: '>'
+              keyword: STRUCT
+              struct_type_schema:
+              - start_angle_bracket: <
+              - parameter: x
+              - data_type:
+                  data_type_identifier: INT64
+              - comma: ','
+              - parameter: y
+              - data_type:
+                  data_type_identifier: INT64
+              - end_angle_bracket: '>'
         - comma: ','
         - parameter: foo5
         - data_type:
             struct_type:
-            - keyword: STRUCT
-            - start_angle_bracket: <
-            - parameter: a
-            - data_type:
-                keyword: ARRAY
-                start_angle_bracket: <
-                data_type:
-                  data_type_identifier: FLOAT
-                end_angle_bracket: '>'
-            - comma: ','
-            - parameter: b
-            - data_type:
-                struct_type:
-                - keyword: STRUCT
-                - start_angle_bracket: <
-                - parameter: x
-                - data_type:
-                    data_type_identifier: INT64
-                - comma: ','
-                - parameter: y
-                - data_type:
-                    data_type_identifier: INT64
-                - end_angle_bracket: '>'
-            - end_angle_bracket: '>'
+              keyword: STRUCT
+              struct_type_schema:
+              - start_angle_bracket: <
+              - parameter: a
+              - data_type:
+                  array_type:
+                    keyword: ARRAY
+                    start_angle_bracket: <
+                    data_type:
+                      data_type_identifier: FLOAT
+                    end_angle_bracket: '>'
+              - comma: ','
+              - parameter: b
+              - data_type:
+                  struct_type:
+                    keyword: STRUCT
+                    struct_type_schema:
+                    - start_angle_bracket: <
+                    - parameter: x
+                    - data_type:
+                        data_type_identifier: INT64
+                    - comma: ','
+                    - parameter: y
+                    - data_type:
+                        data_type_identifier: INT64
+                    - end_angle_bracket: '>'
+              - end_angle_bracket: '>'
         - end_bracket: )
     - keyword: RETURNS
     - data_type:
         struct_type:
           keyword: STRUCT
-          start_angle_bracket: <
-          parameter: product_id
-          data_type:
-            keyword: ARRAY
+          struct_type_schema:
             start_angle_bracket: <
+            parameter: product_id
             data_type:
-              data_type_identifier: INT64
+              array_type:
+                keyword: ARRAY
+                start_angle_bracket: <
+                data_type:
+                  data_type_identifier: INT64
+                end_angle_bracket: '>'
             end_angle_bracket: '>'
-          end_angle_bracket: '>'
     - function_definition:
       - keyword: LANGUAGE
       - naked_identifier: js
diff --git a/test/fixtures/dialects/bigquery/create_js_function_options_library_array.yml b/test/fixtures/dialects/bigquery/create_js_function_options_library_array.yml
index 6081aae..d90fb27 100644
--- a/test/fixtures/dialects/bigquery/create_js_function_options_library_array.yml
+++ b/test/fixtures/dialects/bigquery/create_js_function_options_library_array.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: b89918d7a2b4acf619eee92c26d81ea5d2f81e95e75467199209512ea4c8bab4
+_hash: 61c3e66ff3de5079c9134b67ac6d4eb08d90ccbeeb79ef35a01d5588eee8a2a5
 file:
   statement:
     create_function_statement:
@@ -21,21 +21,23 @@ file:
           end_bracket: )
     - keyword: RETURNS
     - data_type:
-        keyword: ARRAY
-        start_angle_bracket: <
-        data_type:
-          struct_type:
-          - keyword: STRUCT
-          - start_angle_bracket: <
-          - parameter: product_id
-          - data_type:
-              data_type_identifier: INT64
-          - comma: ','
-          - parameter: rating
-          - data_type:
-              data_type_identifier: FLOAT64
-          - end_angle_bracket: '>'
-        end_angle_bracket: '>'
+        array_type:
+          keyword: ARRAY
+          start_angle_bracket: <
+          data_type:
+            struct_type:
+              keyword: STRUCT
+              struct_type_schema:
+              - start_angle_bracket: <
+              - parameter: product_id
+              - data_type:
+                  data_type_identifier: INT64
+              - comma: ','
+              - parameter: rating
+              - data_type:
+                  data_type_identifier: FLOAT64
+              - end_angle_bracket: '>'
+          end_angle_bracket: '>'
     - function_definition:
       - keyword: LANGUAGE
       - naked_identifier: js
diff --git a/test/fixtures/dialects/bigquery/create_js_function_quoted_name.yml b/test/fixtures/dialects/bigquery/create_js_function_quoted_name.yml
index 5f218dd..8ffcc2f 100644
--- a/test/fixtures/dialects/bigquery/create_js_function_quoted_name.yml
+++ b/test/fixtures/dialects/bigquery/create_js_function_quoted_name.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 3993c0772fbe5340d14cae2a13a7bc07e29807131bc3956a3c846110328c483f
+_hash: 4a4cb38f3e6f423ed83783a235c1c7ac5c0a268cfd2a4903b20ccba8352d5faa
 file:
   statement:
     create_function_statement:
@@ -23,15 +23,17 @@ file:
     - data_type:
         struct_type:
           keyword: STRUCT
-          start_angle_bracket: <
-          parameter: '`$=`'
-          data_type:
-            keyword: ARRAY
+          struct_type_schema:
             start_angle_bracket: <
+            parameter: '`$=`'
             data_type:
-              data_type_identifier: INT64
+              array_type:
+                keyword: ARRAY
+                start_angle_bracket: <
+                data_type:
+                  data_type_identifier: INT64
+                end_angle_bracket: '>'
             end_angle_bracket: '>'
-          end_angle_bracket: '>'
     - function_definition:
       - keyword: LANGUAGE
       - naked_identifier: js
diff --git a/test/fixtures/dialects/bigquery/create_js_function_underscore_name.yml b/test/fixtures/dialects/bigquery/create_js_function_underscore_name.yml
index e6077db..565b8ed 100644
--- a/test/fixtures/dialects/bigquery/create_js_function_underscore_name.yml
+++ b/test/fixtures/dialects/bigquery/create_js_function_underscore_name.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: fb5243cbabbf08a55a04c661b274c8e5e4dd127218aaa6ed061caf338165eb08
+_hash: dd3ec6684af84a81c35279fd471dd61e0f8e6b4947388c123b76bd6d21cb80ea
 file:
   statement:
     create_function_statement:
@@ -23,15 +23,17 @@ file:
     - data_type:
         struct_type:
           keyword: STRUCT
-          start_angle_bracket: <
-          parameter: _product_id
-          data_type:
-            keyword: ARRAY
+          struct_type_schema:
             start_angle_bracket: <
+            parameter: _product_id
             data_type:
-              data_type_identifier: INT64
+              array_type:
+                keyword: ARRAY
+                start_angle_bracket: <
+                data_type:
+                  data_type_identifier: INT64
+                end_angle_bracket: '>'
             end_angle_bracket: '>'
-          end_angle_bracket: '>'
     - function_definition:
       - keyword: LANGUAGE
       - naked_identifier: js
diff --git a/test/fixtures/dialects/bigquery/create_table_column_options.yml b/test/fixtures/dialects/bigquery/create_table_column_options.yml
index 1d4886a..3849adb 100644
--- a/test/fixtures/dialects/bigquery/create_table_column_options.yml
+++ b/test/fixtures/dialects/bigquery/create_table_column_options.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 739a8e7f8f105b16fc898fe7c28ec270b485e155f93df0877a0b35d9ad15320f
+_hash: 7c6425d4b2c260947fda76839d8c852def220cf4d503f57679b1e8a198e0dd73
 file:
 - statement:
     create_table_statement:
@@ -67,29 +67,7 @@ file:
           data_type:
             struct_type:
               keyword: STRUCT
-              start_angle_bracket: <
-              parameter: col1
-              data_type:
-                data_type_identifier: INT64
-              options_segment:
-                keyword: OPTIONS
-                bracketed:
-                  start_bracket: (
-                  parameter: description
-                  comparison_operator:
-                    raw_comparison_operator: '='
-                  quoted_literal: '"An INTEGER field in a STRUCT"'
-                  end_bracket: )
-              end_angle_bracket: '>'
-      - comma: ','
-      - column_definition:
-          naked_identifier: y
-          data_type:
-            keyword: ARRAY
-            start_angle_bracket: <
-            data_type:
-              struct_type:
-                keyword: STRUCT
+              struct_type_schema:
                 start_angle_bracket: <
                 parameter: col1
                 data_type:
@@ -101,9 +79,34 @@ file:
                     parameter: description
                     comparison_operator:
                       raw_comparison_operator: '='
-                    quoted_literal: '"An INTEGER field in a REPEATED STRUCT"'
+                    quoted_literal: '"An INTEGER field in a STRUCT"'
                     end_bracket: )
                 end_angle_bracket: '>'
-            end_angle_bracket: '>'
+      - comma: ','
+      - column_definition:
+          naked_identifier: y
+          data_type:
+            array_type:
+              keyword: ARRAY
+              start_angle_bracket: <
+              data_type:
+                struct_type:
+                  keyword: STRUCT
+                  struct_type_schema:
+                    start_angle_bracket: <
+                    parameter: col1
+                    data_type:
+                      data_type_identifier: INT64
+                    options_segment:
+                      keyword: OPTIONS
+                      bracketed:
+                        start_bracket: (
+                        parameter: description
+                        comparison_operator:
+                          raw_comparison_operator: '='
+                        quoted_literal: '"An INTEGER field in a REPEATED STRUCT"'
+                        end_bracket: )
+                    end_angle_bracket: '>'
+              end_angle_bracket: '>'
       - end_bracket: )
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/bigquery/declare_variable.yml b/test/fixtures/dialects/bigquery/declare_variable.yml
index a845b57..ff4f783 100644
--- a/test/fixtures/dialects/bigquery/declare_variable.yml
+++ b/test/fixtures/dialects/bigquery/declare_variable.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: a7344a57c0e59ce697ccfabc4e78ada5ea7a5f53123ec3a8839bca556863d42d
+_hash: 5b2050e76f6e2498f1fbdbe605abe116b14b2f43edfb1b5108fe2258010e715b
 file:
 - statement:
     declare_segment:
@@ -46,10 +46,11 @@ file:
       naked_identifier: var6
       data_type:
         data_type_identifier: string
-        bracketed:
-          start_bracket: (
-          numeric_literal: '10'
-          end_bracket: )
+        bracketed_arguments:
+          bracketed:
+            start_bracket: (
+            numeric_literal: '10'
+            end_bracket: )
 - statement_terminator: ;
 - statement:
     declare_segment:
@@ -57,23 +58,25 @@ file:
       naked_identifier: var7
       data_type:
         data_type_identifier: numeric
-        bracketed:
-        - start_bracket: (
-        - numeric_literal: '5'
-        - comma: ','
-        - numeric_literal: '2'
-        - end_bracket: )
+        bracketed_arguments:
+          bracketed:
+          - start_bracket: (
+          - numeric_literal: '5'
+          - comma: ','
+          - numeric_literal: '2'
+          - end_bracket: )
 - statement_terminator: ;
 - statement:
     declare_segment:
       keyword: declare
       naked_identifier: arr1
       data_type:
-        keyword: array
-        start_angle_bracket: <
-        data_type:
-          data_type_identifier: string
-        end_angle_bracket: '>'
+        array_type:
+          keyword: array
+          start_angle_bracket: <
+          data_type:
+            data_type_identifier: string
+          end_angle_bracket: '>'
 - statement_terminator: ;
 - statement:
     declare_segment:
@@ -101,11 +104,12 @@ file:
     - keyword: declare
     - naked_identifier: arr4
     - data_type:
-        keyword: array
-        start_angle_bracket: <
-        data_type:
-          data_type_identifier: string
-        end_angle_bracket: '>'
+        array_type:
+          keyword: array
+          start_angle_bracket: <
+          data_type:
+            data_type_identifier: string
+          end_angle_bracket: '>'
     - keyword: default
     - array_literal:
       - start_square_bracket: '['
@@ -119,15 +123,17 @@ file:
       keyword: declare
       naked_identifier: arr5
       data_type:
-        keyword: array
-        start_angle_bracket: <
-        data_type:
-          data_type_identifier: string
-          bracketed:
-            start_bracket: (
-            numeric_literal: '10'
-            end_bracket: )
-        end_angle_bracket: '>'
+        array_type:
+          keyword: array
+          start_angle_bracket: <
+          data_type:
+            data_type_identifier: string
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                numeric_literal: '10'
+                end_bracket: )
+          end_angle_bracket: '>'
 - statement_terminator: ;
 - statement:
     declare_segment:
@@ -135,16 +141,17 @@ file:
       naked_identifier: str1
       data_type:
         struct_type:
-        - keyword: struct
-        - start_angle_bracket: <
-        - parameter: f1
-        - data_type:
-            data_type_identifier: string
-        - comma: ','
-        - parameter: f2
-        - data_type:
-            data_type_identifier: string
-        - end_angle_bracket: '>'
+          keyword: struct
+          struct_type_schema:
+          - start_angle_bracket: <
+          - parameter: f1
+          - data_type:
+              data_type_identifier: string
+          - comma: ','
+          - parameter: f2
+          - data_type:
+              data_type_identifier: string
+          - end_angle_bracket: '>'
 - statement_terminator: ;
 - statement:
     declare_segment:
@@ -152,26 +159,29 @@ file:
     - naked_identifier: str2
     - data_type:
         struct_type:
-        - keyword: struct
-        - start_angle_bracket: <
-        - parameter: f1
-        - data_type:
-            data_type_identifier: string
-        - comma: ','
-        - parameter: f2
-        - data_type:
-            data_type_identifier: string
-        - end_angle_bracket: '>'
-    - keyword: default
-    - expression:
-        typeless_struct:
           keyword: struct
-          bracketed:
-          - start_bracket: (
-          - quoted_literal: "'one'"
+          struct_type_schema:
+          - start_angle_bracket: <
+          - parameter: f1
+          - data_type:
+              data_type_identifier: string
           - comma: ','
-          - quoted_literal: "'two'"
-          - end_bracket: )
+          - parameter: f2
+          - data_type:
+              data_type_identifier: string
+          - end_angle_bracket: '>'
+    - keyword: default
+    - expression:
+        typed_struct_literal:
+          struct_type:
+            keyword: struct
+          struct_literal:
+            bracketed:
+            - start_bracket: (
+            - quoted_literal: "'one'"
+            - comma: ','
+            - quoted_literal: "'two'"
+            - end_bracket: )
 - statement_terminator: ;
 - statement:
     declare_segment:
@@ -179,14 +189,16 @@ file:
     - naked_identifier: str3
     - keyword: default
     - expression:
-        typeless_struct:
-          keyword: struct
-          bracketed:
-          - start_bracket: (
-          - quoted_literal: "'one'"
-          - comma: ','
-          - quoted_literal: "'two'"
-          - end_bracket: )
+        typed_struct_literal:
+          struct_type:
+            keyword: struct
+          struct_literal:
+            bracketed:
+            - start_bracket: (
+            - quoted_literal: "'one'"
+            - comma: ','
+            - quoted_literal: "'two'"
+            - end_bracket: )
 - statement_terminator: ;
 - statement:
     declare_segment:
@@ -194,16 +206,17 @@ file:
     - naked_identifier: str4
     - data_type:
         struct_type:
-        - keyword: struct
-        - start_angle_bracket: <
-        - parameter: f1
-        - data_type:
-            data_type_identifier: string
-        - comma: ','
-        - parameter: f2
-        - data_type:
-            data_type_identifier: string
-        - end_angle_bracket: '>'
+          keyword: struct
+          struct_type_schema:
+          - start_angle_bracket: <
+          - parameter: f1
+          - data_type:
+              data_type_identifier: string
+          - comma: ','
+          - parameter: f2
+          - data_type:
+              data_type_identifier: string
+          - end_angle_bracket: '>'
     - keyword: default
     - tuple:
         bracketed:
@@ -219,24 +232,27 @@ file:
       naked_identifier: str5
       data_type:
         struct_type:
-        - keyword: struct
-        - start_angle_bracket: <
-        - parameter: f1
-        - data_type:
-            data_type_identifier: string
-            bracketed:
-              start_bracket: (
-              numeric_literal: '10'
-              end_bracket: )
-        - comma: ','
-        - parameter: f2
-        - data_type:
-            data_type_identifier: string
-            bracketed:
-              start_bracket: (
-              numeric_literal: '10'
-              end_bracket: )
-        - end_angle_bracket: '>'
+          keyword: struct
+          struct_type_schema:
+          - start_angle_bracket: <
+          - parameter: f1
+          - data_type:
+              data_type_identifier: string
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  numeric_literal: '10'
+                  end_bracket: )
+          - comma: ','
+          - parameter: f2
+          - data_type:
+              data_type_identifier: string
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  numeric_literal: '10'
+                  end_bracket: )
+          - end_angle_bracket: '>'
 - statement_terminator: ;
 - statement:
     declare_segment:
diff --git a/test/fixtures/dialects/bigquery/delete.sql b/test/fixtures/dialects/bigquery/delete.sql
index 3832978..23be97c 100644
--- a/test/fixtures/dialects/bigquery/delete.sql
+++ b/test/fixtures/dialects/bigquery/delete.sql
@@ -8,3 +8,7 @@ DELETE dataset.Inventory
 WHERE NOT EXISTS
   (SELECT * from dataset.NewArrivals
    WHERE Inventory.product = NewArrivals.product);
+
+DELETE FROM `project_id.dataset_id.target_name`
+WHERE TRUE
+;
diff --git a/test/fixtures/dialects/bigquery/delete.yml b/test/fixtures/dialects/bigquery/delete.yml
index d8623e8..889a51b 100644
--- a/test/fixtures/dialects/bigquery/delete.yml
+++ b/test/fixtures/dialects/bigquery/delete.yml
@@ -3,12 +3,12 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: fe5d02ee53e337f806fc3921393afde1c4edc4c03d5c94e3a7fbfd5b21fd5d56
+_hash: d0d792ccee74c7d010d7bc94eb4b3b7ac9f5f7c052ce3eb4de5401307396aa62
 file:
 - statement:
     delete_statement:
       keyword: DELETE
-      object_reference:
+      table_reference:
       - naked_identifier: dataset
       - dot: .
       - naked_identifier: Inventory
@@ -24,7 +24,7 @@ file:
 - statement:
     delete_statement:
       keyword: DELETE
-      object_reference:
+      table_reference:
       - naked_identifier: dataset
       - dot: .
       - naked_identifier: Inventory
@@ -61,7 +61,7 @@ file:
 - statement:
     delete_statement:
       keyword: DELETE
-      object_reference:
+      table_reference:
       - naked_identifier: dataset
       - dot: .
       - naked_identifier: Inventory
@@ -103,3 +103,14 @@ file:
                   - naked_identifier: product
             end_bracket: )
 - statement_terminator: ;
+- statement:
+    delete_statement:
+    - keyword: DELETE
+    - keyword: FROM
+    - table_reference:
+        quoted_identifier: '`project_id.dataset_id.target_name`'
+    - where_clause:
+        keyword: WHERE
+        expression:
+          boolean_literal: 'TRUE'
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/bigquery/select_example.yml b/test/fixtures/dialects/bigquery/select_example.yml
index 84e658b..4935892 100644
--- a/test/fixtures/dialects/bigquery/select_example.yml
+++ b/test/fixtures/dialects/bigquery/select_example.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 9a1622e165a454bcedc9eab2e974496e441c538bb14c13433e87435932355b34
+_hash: 491a329ef29ec67c85dc1e8a11e0ec17fbb9c706fb8e37b921f66636221f5dfe
 file:
   statement:
     with_compound_statement:
@@ -30,7 +30,7 @@ file:
             - comma: ','
             - select_clause_element:
                 expression:
-                  typeless_array:
+                  array_expression:
                     keyword: ARRAY
                     bracketed:
                       start_bracket: (
diff --git a/test/fixtures/dialects/bigquery/select_function_object_fields.yml b/test/fixtures/dialects/bigquery/select_function_object_fields.yml
index 07d02d6..7f9ddc2 100644
--- a/test/fixtures/dialects/bigquery/select_function_object_fields.yml
+++ b/test/fixtures/dialects/bigquery/select_function_object_fields.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: e1ac769a469451f83cd79aeacda74b84cd3e92d57f9d5f24df882939a37691d5
+_hash: ea825b3ba14b42f2b2e3bc7c24d40c6086e32fb5c5359e9ce157af8c0bccbe80
 file:
   statement:
     select_statement:
@@ -19,8 +19,9 @@ file:
                 column_reference:
                   naked_identifier: a
               end_bracket: )
-            dot: .
-            parameter: b
+            semi_structured_expression:
+              dot: .
+              naked_identifier: b
           alias_expression:
             keyword: AS
             naked_identifier: field
@@ -35,44 +36,47 @@ file:
                 column_reference:
                   naked_identifier: a
               end_bracket: )
-            dot: .
-            star: '*'
+            semi_structured_expression:
+              dot: .
+              star: '*'
           alias_expression:
             keyword: AS
             naked_identifier: wildcard
       - comma: ','
       - select_clause_element:
           function:
-          - function_name:
+            function_name:
               function_name_identifier: testFunction
-          - bracketed:
+            bracketed:
               start_bracket: (
               expression:
                 column_reference:
                   naked_identifier: a
               end_bracket: )
-          - dot: .
-          - parameter: b
-          - dot: .
-          - parameter: c
+            semi_structured_expression:
+            - dot: .
+            - naked_identifier: b
+            - dot: .
+            - naked_identifier: c
           alias_expression:
             keyword: AS
             naked_identifier: field_with_field
       - comma: ','
       - select_clause_element:
           function:
-          - function_name:
+            function_name:
               function_name_identifier: testFunction
-          - bracketed:
+            bracketed:
               start_bracket: (
               expression:
                 column_reference:
                   naked_identifier: a
               end_bracket: )
-          - dot: .
-          - parameter: b
-          - dot: .
-          - star: '*'
+            semi_structured_expression:
+            - dot: .
+            - naked_identifier: b
+            - dot: .
+            - star: '*'
           alias_expression:
             keyword: AS
             naked_identifier: field_with_wildcard
@@ -99,8 +103,9 @@ file:
                       numeric_literal: '0'
                     end_bracket: )
               end_square_bracket: ']'
-            dot: .
-            star: '*'
+            semi_structured_expression:
+              dot: .
+              star: '*'
           alias_expression:
             keyword: AS
             naked_identifier: field_with_offset_wildcard
@@ -127,8 +132,9 @@ file:
                       numeric_literal: '0'
                     end_bracket: )
               end_square_bracket: ']'
-            dot: .
-            star: '*'
+            semi_structured_expression:
+              dot: .
+              star: '*'
           alias_expression:
             keyword: AS
             naked_identifier: field_with_safe_offset_wildcard
@@ -155,8 +161,9 @@ file:
                       numeric_literal: '1'
                     end_bracket: )
               end_square_bracket: ']'
-            dot: .
-            star: '*'
+            semi_structured_expression:
+              dot: .
+              star: '*'
           alias_expression:
             keyword: AS
             naked_identifier: field_with_ordinal_wildcard
@@ -183,8 +190,9 @@ file:
                       numeric_literal: '1'
                     end_bracket: )
               end_square_bracket: ']'
-            dot: .
-            parameter: a
+            semi_structured_expression:
+              dot: .
+              naked_identifier: a
           alias_expression:
             keyword: AS
             naked_identifier: field_with_ordinal_field
diff --git a/test/fixtures/dialects/bigquery/select_hyphenated_table_name_in_from.sql b/test/fixtures/dialects/bigquery/select_hyphenated_table_name_in_from.sql
index e71c34e..8468956 100644
--- a/test/fixtures/dialects/bigquery/select_hyphenated_table_name_in_from.sql
+++ b/test/fixtures/dialects/bigquery/select_hyphenated_table_name_in_from.sql
@@ -1,5 +1,9 @@
 SELECT * FROM project-a.dataset-b.table-c JOIN dataset-c.table-d USING (a);
 
+SELECT * FROM a-1a.b.c;
+SELECT * FROM a-1.b.c;
+SELECT * FROM project23-123.dataset7-b1.table-2c JOIN dataset12-c1.table-1d USING (a);
+
 SELECT
    col1-col2 AS newcol1,
    col1 - col2 AS newcol2
diff --git a/test/fixtures/dialects/bigquery/select_hyphenated_table_name_in_from.yml b/test/fixtures/dialects/bigquery/select_hyphenated_table_name_in_from.yml
index 3a1c7e0..dfe7391 100644
--- a/test/fixtures/dialects/bigquery/select_hyphenated_table_name_in_from.yml
+++ b/test/fixtures/dialects/bigquery/select_hyphenated_table_name_in_from.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 4389fa4a2f6c1c625776cf5e631134833d847c12997731a811f2045eccb665d6
+_hash: 6f00f24816a288acc08abb48e0eb2f4fb427b593666602762f52ae53d5e62f87
 file:
 - statement:
     select_statement:
@@ -48,6 +48,93 @@ file:
               naked_identifier: a
               end_bracket: )
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+              - naked_identifier: a
+              - dash: '-'
+              - naked_identifier: 1a
+              - dot: .
+              - naked_identifier: b
+              - dot: .
+              - naked_identifier: c
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+              - naked_identifier: a
+              - dash: '-'
+              - naked_identifier: '1'
+              - dot: .
+              - naked_identifier: b
+              - dot: .
+              - naked_identifier: c
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+              - naked_identifier: project23
+              - dash: '-'
+              - naked_identifier: '123'
+              - dot: .
+              - naked_identifier: dataset7
+              - dash: '-'
+              - naked_identifier: b1
+              - dot: .
+              - naked_identifier: table
+              - dash: '-'
+              - naked_identifier: 2c
+          join_clause:
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                - naked_identifier: dataset12
+                - dash: '-'
+                - naked_identifier: c1
+                - dot: .
+                - naked_identifier: table
+                - dash: '-'
+                - naked_identifier: 1d
+          - keyword: USING
+          - bracketed:
+              start_bracket: (
+              naked_identifier: a
+              end_bracket: )
+- statement_terminator: ;
 - statement:
     select_statement:
       select_clause:
diff --git a/test/fixtures/dialects/bigquery/select_mixture_of_array_literals.yml b/test/fixtures/dialects/bigquery/select_mixture_of_array_literals.yml
index 1d53311..27493ac 100644
--- a/test/fixtures/dialects/bigquery/select_mixture_of_array_literals.yml
+++ b/test/fixtures/dialects/bigquery/select_mixture_of_array_literals.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: e49b6d2bea54dae9c4a29e63e5f8dbc165525e0b9b91100b981a377ab6dfa1c1
+_hash: cbf066dd0c8317bbf87b4729592f41e65bf7ac4e26cf7eb7d0a29504232c43a4
 file:
   statement:
     select_statement:
@@ -21,15 +21,17 @@ file:
             end_square_bracket: ']'
       - comma: ','
       - select_clause_element:
-          array_literal:
-            keyword: ARRAY
-            start_angle_bracket: <
-            data_type:
-              data_type_identifier: BOOLEAN
-            end_angle_bracket: '>'
-            start_square_bracket: '['
-            boolean_literal: 'false'
-            end_square_bracket: ']'
+          typed_array_literal:
+            array_type:
+              keyword: ARRAY
+              start_angle_bracket: <
+              data_type:
+                data_type_identifier: BOOLEAN
+              end_angle_bracket: '>'
+            array_literal:
+              start_square_bracket: '['
+              boolean_literal: 'false'
+              end_square_bracket: ']'
       - comma: ','
       - select_clause_element:
           array_literal:
@@ -41,15 +43,17 @@ file:
             naked_identifier: strcol1
       - comma: ','
       - select_clause_element:
-          array_literal:
-            keyword: ARRAY
-            start_angle_bracket: <
-            data_type:
-              data_type_identifier: string
-            end_angle_bracket: '>'
-            start_square_bracket: '['
-            quoted_literal: "'b'"
-            end_square_bracket: ']'
+          typed_array_literal:
+            array_type:
+              keyword: ARRAY
+              start_angle_bracket: <
+              data_type:
+                data_type_identifier: string
+              end_angle_bracket: '>'
+            array_literal:
+              start_square_bracket: '['
+              quoted_literal: "'b'"
+              end_square_bracket: ']'
           alias_expression:
             keyword: AS
             naked_identifier: strcol2
@@ -64,15 +68,17 @@ file:
             naked_identifier: numcol1
       - comma: ','
       - select_clause_element:
-          array_literal:
-            keyword: ARRAY
-            start_angle_bracket: <
-            data_type:
-              data_type_identifier: NUMERIC
-            end_angle_bracket: '>'
-            start_square_bracket: '['
-            numeric_literal: '1.4'
-            end_square_bracket: ']'
+          typed_array_literal:
+            array_type:
+              keyword: ARRAY
+              start_angle_bracket: <
+              data_type:
+                data_type_identifier: NUMERIC
+              end_angle_bracket: '>'
+            array_literal:
+              start_square_bracket: '['
+              numeric_literal: '1.4'
+              end_square_bracket: ']'
           alias_expression:
             keyword: AS
             naked_identifier: numcol2
@@ -81,29 +87,31 @@ file:
           array_literal:
             start_square_bracket: '['
             expression:
-              typeless_struct:
-                keyword: STRUCT
-                bracketed:
-                - start_bracket: (
-                - quoted_literal: '"Rudisha"'
-                - alias_expression:
-                    keyword: AS
-                    naked_identifier: name
-                - comma: ','
-                - array_literal:
-                  - start_square_bracket: '['
-                  - numeric_literal: '23.4'
-                  - comma: ','
-                  - numeric_literal: '26.3'
-                  - comma: ','
-                  - numeric_literal: '26.4'
+              typed_struct_literal:
+                struct_type:
+                  keyword: STRUCT
+                struct_literal:
+                  bracketed:
+                  - start_bracket: (
+                  - quoted_literal: '"Rudisha"'
+                  - alias_expression:
+                      keyword: AS
+                      naked_identifier: name
                   - comma: ','
-                  - numeric_literal: '26.1'
-                  - end_square_bracket: ']'
-                - alias_expression:
-                    keyword: AS
-                    naked_identifier: splits
-                - end_bracket: )
+                  - array_literal:
+                    - start_square_bracket: '['
+                    - numeric_literal: '23.4'
+                    - comma: ','
+                    - numeric_literal: '26.3'
+                    - comma: ','
+                    - numeric_literal: '26.4'
+                    - comma: ','
+                    - numeric_literal: '26.1'
+                    - end_square_bracket: ']'
+                  - alias_expression:
+                      keyword: AS
+                      naked_identifier: splits
+                  - end_bracket: )
             end_square_bracket: ']'
           alias_expression:
             keyword: AS
@@ -115,19 +123,19 @@ file:
             - naked_identifier: col1
             - dot: .
             - naked_identifier: obj1
-            array_accessor:
-              start_square_bracket: '['
-              expression:
-                function:
-                  function_name:
-                    function_name_identifier: safe_offset
-                  bracketed:
-                    start_bracket: (
-                    expression:
-                      numeric_literal: '1'
-                    end_bracket: )
-              end_square_bracket: ']'
             semi_structured_expression:
+              array_accessor:
+                start_square_bracket: '['
+                expression:
+                  function:
+                    function_name:
+                      function_name_identifier: safe_offset
+                    bracketed:
+                      start_bracket: (
+                      expression:
+                        numeric_literal: '1'
+                      end_bracket: )
+                end_square_bracket: ']'
               dot: .
               naked_identifier: a
           alias_expression:
diff --git a/test/fixtures/dialects/bigquery/select_pivot.sql b/test/fixtures/dialects/bigquery/select_pivot.sql
index 476c4c2..d197742 100644
--- a/test/fixtures/dialects/bigquery/select_pivot.sql
+++ b/test/fixtures/dialects/bigquery/select_pivot.sql
@@ -18,3 +18,29 @@ SELECT * FROM
   (SELECT sales, quarter FROM Produce)
   PIVOT(SUM(sales), COUNT(sales) FOR quarter IN ('Q1', 'Q2', 'Q3'));
 
+SELECT
+  col1,
+  col2
+FROM
+  table1
+  PIVOT(SUM(`grand_total`) FOR REPLACE(LOWER(`media_type`), " ", "_") IN (
+    "cinema", "digital", "direct_mail", "door_drops", "outdoor", "press", "radio", "tv"
+  ));
+
+SELECT
+  col1,
+  col2
+FROM
+  table1
+  PIVOT(SUM(`grand_total`) FOR `media_type` IN (
+    "cinema", "digital", "direct_mail", "door_drops", "outdoor", "press", "radio", "tv"
+  ));
+
+SELECT
+  col1,
+  col2
+FROM
+  table1
+  PIVOT(SUM(`grand_total`) FOR '2' || '1' IN (
+    "cinema", "digital", "direct_mail", "door_drops", "outdoor", "press", "radio", "tv"
+  ));
diff --git a/test/fixtures/dialects/bigquery/select_pivot.yml b/test/fixtures/dialects/bigquery/select_pivot.yml
index 5019afb..9671a7c 100644
--- a/test/fixtures/dialects/bigquery/select_pivot.yml
+++ b/test/fixtures/dialects/bigquery/select_pivot.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 49f11ac3710251e70d56b2401c43c5cde0e08d3f81d5913210af6c8af06ce0b0
+_hash: 7e339af111db52b3b16ec80bdaa37d553acfb835383a16a3629faa621864630b
 file:
 - statement:
     select_statement:
@@ -49,7 +49,9 @@ file:
                       naked_identifier: sales
                   end_bracket: )
             - keyword: FOR
-            - naked_identifier: quarter
+            - pivot_for_clause:
+                column_reference:
+                  naked_identifier: quarter
             - keyword: IN
             - bracketed:
               - start_bracket: (
@@ -110,7 +112,9 @@ file:
                       naked_identifier: sales
                   end_bracket: )
             - keyword: FOR
-            - naked_identifier: quarter
+            - pivot_for_clause:
+                column_reference:
+                  naked_identifier: quarter
             - keyword: IN
             - bracketed:
               - start_bracket: (
@@ -168,7 +172,9 @@ file:
                       naked_identifier: sales
                   end_bracket: )
             - keyword: FOR
-            - naked_identifier: quarter
+            - pivot_for_clause:
+                column_reference:
+                  naked_identifier: quarter
             - keyword: IN
             - bracketed:
               - start_bracket: (
@@ -227,7 +233,9 @@ file:
                       naked_identifier: sales
                   end_bracket: )
             - keyword: FOR
-            - naked_identifier: quarter
+            - pivot_for_clause:
+                column_reference:
+                  naked_identifier: quarter
             - keyword: IN
             - bracketed:
               - start_bracket: (
@@ -296,7 +304,9 @@ file:
                       naked_identifier: sales
                   end_bracket: )
             - keyword: FOR
-            - naked_identifier: quarter
+            - pivot_for_clause:
+                column_reference:
+                  naked_identifier: quarter
             - keyword: IN
             - bracketed:
               - start_bracket: (
@@ -308,3 +318,195 @@ file:
               - end_bracket: )
             - end_bracket: )
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: col1
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: col2
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: table1
+          from_pivot_expression:
+            keyword: PIVOT
+            bracketed:
+            - start_bracket: (
+            - function:
+                function_name:
+                  function_name_identifier: SUM
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    column_reference:
+                      quoted_identifier: '`grand_total`'
+                  end_bracket: )
+            - keyword: FOR
+            - pivot_for_clause:
+                function:
+                  function_name:
+                    function_name_identifier: REPLACE
+                  bracketed:
+                  - start_bracket: (
+                  - expression:
+                      function:
+                        function_name:
+                          function_name_identifier: LOWER
+                        bracketed:
+                          start_bracket: (
+                          expression:
+                            column_reference:
+                              quoted_identifier: '`media_type`'
+                          end_bracket: )
+                  - comma: ','
+                  - expression:
+                      quoted_literal: '" "'
+                  - comma: ','
+                  - expression:
+                      quoted_literal: '"_"'
+                  - end_bracket: )
+            - keyword: IN
+            - bracketed:
+              - start_bracket: (
+              - quoted_literal: '"cinema"'
+              - comma: ','
+              - quoted_literal: '"digital"'
+              - comma: ','
+              - quoted_literal: '"direct_mail"'
+              - comma: ','
+              - quoted_literal: '"door_drops"'
+              - comma: ','
+              - quoted_literal: '"outdoor"'
+              - comma: ','
+              - quoted_literal: '"press"'
+              - comma: ','
+              - quoted_literal: '"radio"'
+              - comma: ','
+              - quoted_literal: '"tv"'
+              - end_bracket: )
+            - end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: col1
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: col2
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: table1
+          from_pivot_expression:
+            keyword: PIVOT
+            bracketed:
+            - start_bracket: (
+            - function:
+                function_name:
+                  function_name_identifier: SUM
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    column_reference:
+                      quoted_identifier: '`grand_total`'
+                  end_bracket: )
+            - keyword: FOR
+            - pivot_for_clause:
+                column_reference:
+                  quoted_identifier: '`media_type`'
+            - keyword: IN
+            - bracketed:
+              - start_bracket: (
+              - quoted_literal: '"cinema"'
+              - comma: ','
+              - quoted_literal: '"digital"'
+              - comma: ','
+              - quoted_literal: '"direct_mail"'
+              - comma: ','
+              - quoted_literal: '"door_drops"'
+              - comma: ','
+              - quoted_literal: '"outdoor"'
+              - comma: ','
+              - quoted_literal: '"press"'
+              - comma: ','
+              - quoted_literal: '"radio"'
+              - comma: ','
+              - quoted_literal: '"tv"'
+              - end_bracket: )
+            - end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: col1
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: col2
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: table1
+          from_pivot_expression:
+            keyword: PIVOT
+            bracketed:
+            - start_bracket: (
+            - function:
+                function_name:
+                  function_name_identifier: SUM
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    column_reference:
+                      quoted_identifier: '`grand_total`'
+                  end_bracket: )
+            - keyword: FOR
+            - pivot_for_clause:
+                expression:
+                - quoted_literal: "'2'"
+                - binary_operator:
+                  - pipe: '|'
+                  - pipe: '|'
+                - quoted_literal: "'1'"
+            - keyword: IN
+            - bracketed:
+              - start_bracket: (
+              - quoted_literal: '"cinema"'
+              - comma: ','
+              - quoted_literal: '"digital"'
+              - comma: ','
+              - quoted_literal: '"direct_mail"'
+              - comma: ','
+              - quoted_literal: '"door_drops"'
+              - comma: ','
+              - quoted_literal: '"outdoor"'
+              - comma: ','
+              - quoted_literal: '"press"'
+              - comma: ','
+              - quoted_literal: '"radio"'
+              - comma: ','
+              - quoted_literal: '"tv"'
+              - end_bracket: )
+            - end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/bigquery/select_struct.sql b/test/fixtures/dialects/bigquery/select_struct.sql
index 251522f..0a1887b 100644
--- a/test/fixtures/dialects/bigquery/select_struct.sql
+++ b/test/fixtures/dialects/bigquery/select_struct.sql
@@ -13,6 +13,10 @@ select as struct
   '1' as bb,
   2 as aa;
 
+select distinct as struct
+    '1' as bb,
+    2 as aa;
+
 -- Example of explicitly building a struct in a select clause.
 select
   struct(
diff --git a/test/fixtures/dialects/bigquery/select_struct.yml b/test/fixtures/dialects/bigquery/select_struct.yml
index 73802fc..f44b924 100644
--- a/test/fixtures/dialects/bigquery/select_struct.yml
+++ b/test/fixtures/dialects/bigquery/select_struct.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 1904673c879383bfd4b5053b1af3ff5eda6317ab68619647a3910c79a2d0107a
+_hash: 886daa26e3b50467bf7f6a7be24692fe49125414ffc523e1eb4c7efc91067e16
 file:
 - statement:
     select_statement:
@@ -17,7 +17,7 @@ file:
       - comma: ','
       - select_clause_element:
           expression:
-            typeless_array:
+            array_expression:
               keyword: array
               bracketed:
                 start_bracket: (
@@ -79,32 +79,54 @@ file:
             keyword: as
             naked_identifier: aa
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: select
+      - select_clause_modifier:
+        - keyword: distinct
+        - keyword: as
+        - keyword: struct
+      - select_clause_element:
+          quoted_literal: "'1'"
+          alias_expression:
+            keyword: as
+            naked_identifier: bb
+      - comma: ','
+      - select_clause_element:
+          numeric_literal: '2'
+          alias_expression:
+            keyword: as
+            naked_identifier: aa
+- statement_terminator: ;
 - statement:
     select_statement:
       select_clause:
         keyword: select
         select_clause_element:
           expression:
-            typeless_struct:
-              keyword: struct
-              bracketed:
-              - start_bracket: (
-              - column_reference:
-                - naked_identifier: bar
-                - dot: .
-                - naked_identifier: bar_id
-              - alias_expression:
-                  keyword: as
-                  naked_identifier: id
-              - comma: ','
-              - column_reference:
-                - naked_identifier: bar
-                - dot: .
-                - naked_identifier: bar_name
-              - alias_expression:
-                  keyword: as
-                  naked_identifier: bar
-              - end_bracket: )
+            typed_struct_literal:
+              struct_type:
+                keyword: struct
+              struct_literal:
+                bracketed:
+                - start_bracket: (
+                - column_reference:
+                  - naked_identifier: bar
+                  - dot: .
+                  - naked_identifier: bar_id
+                - alias_expression:
+                    keyword: as
+                    naked_identifier: id
+                - comma: ','
+                - column_reference:
+                  - naked_identifier: bar
+                  - dot: .
+                  - naked_identifier: bar_name
+                - alias_expression:
+                    keyword: as
+                    naked_identifier: bar
+                - end_bracket: )
           alias_expression:
             keyword: as
             naked_identifier: bar
@@ -158,39 +180,42 @@ file:
                 bracketed:
                   start_bracket: (
                   expression:
-                    array_literal:
-                    - keyword: ARRAY
-                    - start_angle_bracket: <
-                    - data_type:
-                        struct_type:
-                        - keyword: STRUCT
-                        - start_angle_bracket: <
-                        - parameter: col_1
-                        - data_type:
-                            data_type_identifier: STRING
-                        - comma: ','
-                        - parameter: col_2
-                        - data_type:
-                            data_type_identifier: STRING
-                        - end_angle_bracket: '>'
-                    - end_angle_bracket: '>'
-                    - start_square_bracket: '['
-                    - expression:
-                        bracketed:
-                        - start_bracket: (
-                        - quoted_literal: "'hello'"
-                        - comma: ','
-                        - quoted_literal: "'world'"
-                        - end_bracket: )
-                    - comma: ','
-                    - expression:
-                        bracketed:
-                        - start_bracket: (
-                        - quoted_literal: "'hi'"
-                        - comma: ','
-                        - quoted_literal: "'there'"
-                        - end_bracket: )
-                    - end_square_bracket: ']'
+                    typed_array_literal:
+                      array_type:
+                        keyword: ARRAY
+                        start_angle_bracket: <
+                        data_type:
+                          struct_type:
+                            keyword: STRUCT
+                            struct_type_schema:
+                            - start_angle_bracket: <
+                            - parameter: col_1
+                            - data_type:
+                                data_type_identifier: STRING
+                            - comma: ','
+                            - parameter: col_2
+                            - data_type:
+                                data_type_identifier: STRING
+                            - end_angle_bracket: '>'
+                        end_angle_bracket: '>'
+                      array_literal:
+                      - start_square_bracket: '['
+                      - expression:
+                          bracketed:
+                          - start_bracket: (
+                          - quoted_literal: "'hello'"
+                          - comma: ','
+                          - quoted_literal: "'world'"
+                          - end_bracket: )
+                      - comma: ','
+                      - expression:
+                          bracketed:
+                          - start_bracket: (
+                          - quoted_literal: "'hi'"
+                          - comma: ','
+                          - quoted_literal: "'there'"
+                          - end_bracket: )
+                      - end_square_bracket: ']'
                   end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -199,71 +224,78 @@ file:
       - keyword: SELECT
       - select_clause_element:
           expression:
-            struct_type:
-              keyword: STRUCT
-              start_angle_bracket: <
-              data_type:
-                data_type_identifier: int64
-              end_angle_bracket: '>'
-            bracketed:
-              start_bracket: (
-              expression:
-                numeric_literal: '5'
-              end_bracket: )
+            typed_struct_literal:
+              struct_type:
+                keyword: STRUCT
+                struct_type_schema:
+                  start_angle_bracket: <
+                  data_type:
+                    data_type_identifier: int64
+                  end_angle_bracket: '>'
+              struct_literal:
+                bracketed:
+                  start_bracket: (
+                  numeric_literal: '5'
+                  end_bracket: )
       - comma: ','
       - select_clause_element:
           expression:
-            struct_type:
-              keyword: STRUCT
-              start_angle_bracket: <
-              data_type:
-                data_type_identifier: date
-              end_angle_bracket: '>'
-            bracketed:
-              start_bracket: (
-              expression:
-                quoted_literal: '"2011-05-05"'
-              end_bracket: )
+            typed_struct_literal:
+              struct_type:
+                keyword: STRUCT
+                struct_type_schema:
+                  start_angle_bracket: <
+                  data_type:
+                    data_type_identifier: date
+                  end_angle_bracket: '>'
+              struct_literal:
+                bracketed:
+                  start_bracket: (
+                  quoted_literal: '"2011-05-05"'
+                  end_bracket: )
       - comma: ','
       - select_clause_element:
           expression:
-            struct_type:
-            - keyword: STRUCT
-            - start_angle_bracket: <
-            - parameter: x
-            - data_type:
-                data_type_identifier: int64
-            - comma: ','
-            - parameter: y
-            - data_type:
-                data_type_identifier: string
-            - end_angle_bracket: '>'
-            bracketed:
-            - start_bracket: (
-            - expression:
-                numeric_literal: '1'
-            - comma: ','
-            - expression:
-                column_reference:
-                - naked_identifier: t
-                - dot: .
-                - naked_identifier: str_col
-            - end_bracket: )
+            typed_struct_literal:
+              struct_type:
+                keyword: STRUCT
+                struct_type_schema:
+                - start_angle_bracket: <
+                - parameter: x
+                - data_type:
+                    data_type_identifier: int64
+                - comma: ','
+                - parameter: y
+                - data_type:
+                    data_type_identifier: string
+                - end_angle_bracket: '>'
+              struct_literal:
+                bracketed:
+                  start_bracket: (
+                  numeric_literal: '1'
+                  comma: ','
+                  column_reference:
+                  - naked_identifier: t
+                  - dot: .
+                  - naked_identifier: str_col
+                  end_bracket: )
       - comma: ','
       - select_clause_element:
           expression:
-            struct_type:
-              keyword: STRUCT
-              start_angle_bracket: <
-              data_type:
-                data_type_identifier: int64
-              end_angle_bracket: '>'
-            bracketed:
-              start_bracket: (
-              expression:
-                column_reference:
-                  naked_identifier: int_col
-              end_bracket: )
+            typed_struct_literal:
+              struct_type:
+                keyword: STRUCT
+                struct_type_schema:
+                  start_angle_bracket: <
+                  data_type:
+                    data_type_identifier: int64
+                  end_angle_bracket: '>'
+              struct_literal:
+                bracketed:
+                  start_bracket: (
+                  column_reference:
+                    naked_identifier: int_col
+                  end_bracket: )
 - statement_terminator: ;
 - statement:
     select_statement:
@@ -271,16 +303,18 @@ file:
         keyword: SELECT
         select_clause_element:
           expression:
-            typeless_struct:
-              keyword: STRUCT
-              bracketed:
-              - start_bracket: (
-              - column_reference:
-                  naked_identifier: some_field
-              - comma: ','
-              - column_reference:
-                  naked_identifier: some_other_field
-              - end_bracket: )
+            typed_struct_literal:
+              struct_type:
+                keyword: STRUCT
+              struct_literal:
+                bracketed:
+                - start_bracket: (
+                - column_reference:
+                    naked_identifier: some_field
+                - comma: ','
+                - column_reference:
+                    naked_identifier: some_other_field
+                - end_bracket: )
           alias_expression:
             keyword: AS
             naked_identifier: col
diff --git a/test/fixtures/dialects/bigquery/select_typeless_struct_inside_function.yml b/test/fixtures/dialects/bigquery/select_typeless_struct_inside_function.yml
index 78ce0dc..5f35e3a 100644
--- a/test/fixtures/dialects/bigquery/select_typeless_struct_inside_function.yml
+++ b/test/fixtures/dialects/bigquery/select_typeless_struct_inside_function.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 7c04ee8c9f925904cdc9cb7e006f1bb3fcc6b131a147151e2d9d0c2b74af92ac
+_hash: c95046c9221d495e525f8bdbc766233769c7389747bc4042ae8b8ddf6f8ebeca
 file:
 - statement:
     select_statement:
@@ -11,24 +11,28 @@ file:
         keyword: SELECT
         select_clause_element:
           expression:
-            typeless_struct:
-              keyword: STRUCT
-              bracketed:
-                start_bracket: (
-                expression:
-                  typeless_struct:
-                    keyword: STRUCT
-                    bracketed:
-                      start_bracket: (
-                      numeric_literal: '1'
-                      alias_expression:
-                        keyword: AS
-                        naked_identifier: b
-                      end_bracket: )
-                alias_expression:
-                  keyword: AS
-                  naked_identifier: a
-                end_bracket: )
+            typed_struct_literal:
+              struct_type:
+                keyword: STRUCT
+              struct_literal:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    typed_struct_literal:
+                      struct_type:
+                        keyword: STRUCT
+                      struct_literal:
+                        bracketed:
+                          start_bracket: (
+                          numeric_literal: '1'
+                          alias_expression:
+                            keyword: AS
+                            naked_identifier: b
+                          end_bracket: )
+                  alias_expression:
+                    keyword: AS
+                    naked_identifier: a
+                  end_bracket: )
           alias_expression:
             keyword: AS
             naked_identifier: foo
@@ -44,22 +48,24 @@ file:
             bracketed:
               start_bracket: (
               expression:
-                typeless_struct:
-                  keyword: STRUCT
-                  bracketed:
-                  - start_bracket: (
-                  - column_reference:
-                      naked_identifier: a
-                  - alias_expression:
-                      keyword: AS
-                      naked_identifier: a
-                  - comma: ','
-                  - column_reference:
-                      naked_identifier: b
-                  - alias_expression:
-                      keyword: AS
-                      naked_identifier: b
-                  - end_bracket: )
+                typed_struct_literal:
+                  struct_type:
+                    keyword: STRUCT
+                  struct_literal:
+                    bracketed:
+                    - start_bracket: (
+                    - column_reference:
+                        naked_identifier: a
+                    - alias_expression:
+                        keyword: AS
+                        naked_identifier: a
+                    - comma: ','
+                    - column_reference:
+                        naked_identifier: b
+                    - alias_expression:
+                        keyword: AS
+                        naked_identifier: b
+                    - end_bracket: )
               end_bracket: )
       from_clause:
         keyword: FROM
diff --git a/test/fixtures/dialects/bigquery/select_unpivot.sql b/test/fixtures/dialects/bigquery/select_unpivot.sql
index 6d459c4..fb12411 100644
--- a/test/fixtures/dialects/bigquery/select_unpivot.sql
+++ b/test/fixtures/dialects/bigquery/select_unpivot.sql
@@ -6,6 +6,9 @@ SELECT * FROM Produce;
 SELECT * FROM Produce
 UNPIVOT(sales FOR quarter IN (Q1, Q2, Q3, Q4));
 
+SELECT * FROM Produce
+UNPIVOT(sales FOR quarter IN (Q1 AS 1, Q2 AS 2, Q3 AS 3, Q4 AS 4));
+
 SELECT * FROM Produce
 UNPIVOT INCLUDE NULLS (sales FOR quarter IN (Q1, Q2, Q3, Q4));
 
diff --git a/test/fixtures/dialects/bigquery/select_unpivot.yml b/test/fixtures/dialects/bigquery/select_unpivot.yml
index e5f710c..ba88fd6 100644
--- a/test/fixtures/dialects/bigquery/select_unpivot.yml
+++ b/test/fixtures/dialects/bigquery/select_unpivot.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: f277aef81cf2ed68a5440c23a66868f475d0cbf1af04e44fbf9cd38b923c1d21
+_hash: 7ae3af309e693b3d3a22d339e25a49727a20c9575cebaf26d47657c313dc9bdd
 file:
 - statement:
     with_compound_statement:
@@ -117,6 +117,53 @@ file:
               - end_bracket: )
             - end_bracket: )
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: Produce
+          from_unpivot_expression:
+            keyword: UNPIVOT
+            bracketed:
+            - start_bracket: (
+            - naked_identifier: sales
+            - keyword: FOR
+            - naked_identifier: quarter
+            - keyword: IN
+            - bracketed:
+              - start_bracket: (
+              - naked_identifier: Q1
+              - alias_expression:
+                  keyword: AS
+                  numeric_literal: '1'
+              - comma: ','
+              - naked_identifier: Q2
+              - alias_expression:
+                  keyword: AS
+                  numeric_literal: '2'
+              - comma: ','
+              - naked_identifier: Q3
+              - alias_expression:
+                  keyword: AS
+                  numeric_literal: '3'
+              - comma: ','
+              - naked_identifier: Q4
+              - alias_expression:
+                  keyword: AS
+                  numeric_literal: '4'
+              - end_bracket: )
+            - end_bracket: )
+- statement_terminator: ;
 - statement:
     select_statement:
       select_clause:
diff --git a/test/fixtures/dialects/bigquery/select_with_offset_2.yml b/test/fixtures/dialects/bigquery/select_with_offset_2.yml
index 0b8e56e..985b406 100644
--- a/test/fixtures/dialects/bigquery/select_with_offset_2.yml
+++ b/test/fixtures/dialects/bigquery/select_with_offset_2.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 75e49530984992ff87e54e238cb77e823a9e6484475b723c439faca5747acb2b
+_hash: ece671bded750655c13f51467cb244bc1db9c8c9b97b074605788d583db69f80
 file:
   statement:
     select_statement:
@@ -11,7 +11,7 @@ file:
         keyword: SELECT
         select_clause_element:
           expression:
-            typeless_array:
+            array_expression:
               keyword: ARRAY
               bracketed:
                 start_bracket: (
diff --git a/test/fixtures/dialects/bigquery/select_with_window.sql b/test/fixtures/dialects/bigquery/select_with_window.sql
new file mode 100644
index 0000000..fce13be
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/select_with_window.sql
@@ -0,0 +1,36 @@
+SELECT item, purchases, category, LAST_VALUE(item)
+  OVER (item_window) AS most_popular
+FROM Produce
+WINDOW item_window AS (
+  PARTITION BY category
+  ORDER BY purchases
+  ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING);
+
+SELECT item, purchases, category, LAST_VALUE(item)
+  OVER (d) AS most_popular
+FROM Produce
+WINDOW
+  a AS (PARTITION BY category),
+  b AS (a ORDER BY purchases),
+  c AS (b ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING),
+  d AS (c);
+
+SELECT item, purchases, category, LAST_VALUE(item)
+  OVER (c ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) AS most_popular
+FROM Produce
+WINDOW
+  a AS (PARTITION BY category),
+  b AS (a ORDER BY purchases),
+  c AS b;
+
+select
+    *
+    , max(x) over (window_z) as max_x_over_z
+from raw_data_1
+window window_z as (partition by z)
+union all
+select
+    *
+    , max(x) over (window_z) as max_x_over_z
+from raw_data_2
+window window_z as (partition by z);
diff --git a/test/fixtures/dialects/bigquery/select_with_window.yml b/test/fixtures/dialects/bigquery/select_with_window.yml
new file mode 100644
index 0000000..0633a2e
--- /dev/null
+++ b/test/fixtures/dialects/bigquery/select_with_window.yml
@@ -0,0 +1,370 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 078f423fe0fa8ac12ea0aa72af5d13b5f50c251898e1cb1408e33805226f6a01
+file:
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: item
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: purchases
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: category
+      - comma: ','
+      - select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: LAST_VALUE
+            bracketed:
+              start_bracket: (
+              expression:
+                column_reference:
+                  naked_identifier: item
+              end_bracket: )
+            over_clause:
+              keyword: OVER
+              bracketed:
+                start_bracket: (
+                window_specification:
+                  naked_identifier: item_window
+                end_bracket: )
+          alias_expression:
+            keyword: AS
+            naked_identifier: most_popular
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: Produce
+      named_window:
+        keyword: WINDOW
+        named_window_expression:
+          naked_identifier: item_window
+          keyword: AS
+          bracketed:
+            start_bracket: (
+            window_specification:
+              partitionby_clause:
+              - keyword: PARTITION
+              - keyword: BY
+              - expression:
+                  column_reference:
+                    naked_identifier: category
+              orderby_clause:
+              - keyword: ORDER
+              - keyword: BY
+              - column_reference:
+                  naked_identifier: purchases
+              frame_clause:
+              - keyword: ROWS
+              - keyword: BETWEEN
+              - numeric_literal: '2'
+              - keyword: PRECEDING
+              - keyword: AND
+              - numeric_literal: '2'
+              - keyword: FOLLOWING
+            end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: item
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: purchases
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: category
+      - comma: ','
+      - select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: LAST_VALUE
+            bracketed:
+              start_bracket: (
+              expression:
+                column_reference:
+                  naked_identifier: item
+              end_bracket: )
+            over_clause:
+              keyword: OVER
+              bracketed:
+                start_bracket: (
+                window_specification:
+                  naked_identifier: d
+                end_bracket: )
+          alias_expression:
+            keyword: AS
+            naked_identifier: most_popular
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: Produce
+      named_window:
+      - keyword: WINDOW
+      - named_window_expression:
+          naked_identifier: a
+          keyword: AS
+          bracketed:
+            start_bracket: (
+            window_specification:
+              partitionby_clause:
+              - keyword: PARTITION
+              - keyword: BY
+              - expression:
+                  column_reference:
+                    naked_identifier: category
+            end_bracket: )
+      - comma: ','
+      - named_window_expression:
+          naked_identifier: b
+          keyword: AS
+          bracketed:
+            start_bracket: (
+            window_specification:
+              naked_identifier: a
+              orderby_clause:
+              - keyword: ORDER
+              - keyword: BY
+              - column_reference:
+                  naked_identifier: purchases
+            end_bracket: )
+      - comma: ','
+      - named_window_expression:
+          naked_identifier: c
+          keyword: AS
+          bracketed:
+            start_bracket: (
+            window_specification:
+              naked_identifier: b
+              frame_clause:
+              - keyword: ROWS
+              - keyword: BETWEEN
+              - numeric_literal: '2'
+              - keyword: PRECEDING
+              - keyword: AND
+              - numeric_literal: '2'
+              - keyword: FOLLOWING
+            end_bracket: )
+      - comma: ','
+      - named_window_expression:
+          naked_identifier: d
+          keyword: AS
+          bracketed:
+            start_bracket: (
+            window_specification:
+              naked_identifier: c
+            end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: item
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: purchases
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: category
+      - comma: ','
+      - select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: LAST_VALUE
+            bracketed:
+              start_bracket: (
+              expression:
+                column_reference:
+                  naked_identifier: item
+              end_bracket: )
+            over_clause:
+              keyword: OVER
+              bracketed:
+                start_bracket: (
+                window_specification:
+                  naked_identifier: c
+                  frame_clause:
+                  - keyword: ROWS
+                  - keyword: BETWEEN
+                  - numeric_literal: '2'
+                  - keyword: PRECEDING
+                  - keyword: AND
+                  - numeric_literal: '2'
+                  - keyword: FOLLOWING
+                end_bracket: )
+          alias_expression:
+            keyword: AS
+            naked_identifier: most_popular
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: Produce
+      named_window:
+      - keyword: WINDOW
+      - named_window_expression:
+          naked_identifier: a
+          keyword: AS
+          bracketed:
+            start_bracket: (
+            window_specification:
+              partitionby_clause:
+              - keyword: PARTITION
+              - keyword: BY
+              - expression:
+                  column_reference:
+                    naked_identifier: category
+            end_bracket: )
+      - comma: ','
+      - named_window_expression:
+          naked_identifier: b
+          keyword: AS
+          bracketed:
+            start_bracket: (
+            window_specification:
+              naked_identifier: a
+              orderby_clause:
+              - keyword: ORDER
+              - keyword: BY
+              - column_reference:
+                  naked_identifier: purchases
+            end_bracket: )
+      - comma: ','
+      - named_window_expression:
+        - naked_identifier: c
+        - keyword: AS
+        - naked_identifier: b
+- statement_terminator: ;
+- statement:
+    set_expression:
+    - select_statement:
+        select_clause:
+        - keyword: select
+        - select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        - comma: ','
+        - select_clause_element:
+            function:
+              function_name:
+                function_name_identifier: max
+              bracketed:
+                start_bracket: (
+                expression:
+                  column_reference:
+                    naked_identifier: x
+                end_bracket: )
+              over_clause:
+                keyword: over
+                bracketed:
+                  start_bracket: (
+                  window_specification:
+                    naked_identifier: window_z
+                  end_bracket: )
+            alias_expression:
+              keyword: as
+              naked_identifier: max_x_over_z
+        from_clause:
+          keyword: from
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: raw_data_1
+        named_window:
+          keyword: window
+          named_window_expression:
+            naked_identifier: window_z
+            keyword: as
+            bracketed:
+              start_bracket: (
+              window_specification:
+                partitionby_clause:
+                - keyword: partition
+                - keyword: by
+                - expression:
+                    column_reference:
+                      naked_identifier: z
+              end_bracket: )
+    - set_operator:
+      - keyword: union
+      - keyword: all
+    - select_statement:
+        select_clause:
+        - keyword: select
+        - select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        - comma: ','
+        - select_clause_element:
+            function:
+              function_name:
+                function_name_identifier: max
+              bracketed:
+                start_bracket: (
+                expression:
+                  column_reference:
+                    naked_identifier: x
+                end_bracket: )
+              over_clause:
+                keyword: over
+                bracketed:
+                  start_bracket: (
+                  window_specification:
+                    naked_identifier: window_z
+                  end_bracket: )
+            alias_expression:
+              keyword: as
+              naked_identifier: max_x_over_z
+        from_clause:
+          keyword: from
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: raw_data_2
+        named_window:
+          keyword: window
+          named_window_expression:
+            naked_identifier: window_z
+            keyword: as
+            bracketed:
+              start_bracket: (
+              window_specification:
+                partitionby_clause:
+                - keyword: partition
+                - keyword: by
+                - expression:
+                    column_reference:
+                      naked_identifier: z
+              end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/bigquery/typeless_array.yml b/test/fixtures/dialects/bigquery/typeless_array.yml
index f094698..f0b73a0 100644
--- a/test/fixtures/dialects/bigquery/typeless_array.yml
+++ b/test/fixtures/dialects/bigquery/typeless_array.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 5070e22ad719d285be0755f97cf110793ba0f45bb27f5bb6cb7f7a4e28127e13
+_hash: 153865d0743d678de44de1f4629c346e170b04476e09b91ae2b671f31bef4d7b
 file:
   statement:
     select_statement:
@@ -11,7 +11,7 @@ file:
         keyword: SELECT
         select_clause_element:
           expression:
-            typeless_array:
+            array_expression:
               keyword: ARRAY
               bracketed:
                 start_bracket: (
diff --git a/test/fixtures/dialects/bigquery/typeless_struct.yml b/test/fixtures/dialects/bigquery/typeless_struct.yml
index 9664339..1f3bc75 100644
--- a/test/fixtures/dialects/bigquery/typeless_struct.yml
+++ b/test/fixtures/dialects/bigquery/typeless_struct.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: c8a4ce0043e8afa1675113c93ec6f9b85a230ecc31e7f5a2bb0fc4dd6ac5e824
+_hash: 6e5ae6640110e593f6849562ffcd1161fe6ccefbadb97693bc80e003c4e935c8
 file:
 - statement:
     select_statement:
@@ -19,36 +19,40 @@ file:
                 boolean_literal: 'TRUE'
             - comma: ','
             - expression:
-                typeless_struct:
-                  keyword: STRUCT
-                  bracketed:
-                  - start_bracket: (
-                  - quoted_literal: "'hello'"
-                  - alias_expression:
-                      keyword: AS
-                      naked_identifier: greeting
-                  - comma: ','
-                  - quoted_literal: "'world'"
-                  - alias_expression:
-                      keyword: AS
-                      naked_identifier: subject
-                  - end_bracket: )
+                typed_struct_literal:
+                  struct_type:
+                    keyword: STRUCT
+                  struct_literal:
+                    bracketed:
+                    - start_bracket: (
+                    - quoted_literal: "'hello'"
+                    - alias_expression:
+                        keyword: AS
+                        naked_identifier: greeting
+                    - comma: ','
+                    - quoted_literal: "'world'"
+                    - alias_expression:
+                        keyword: AS
+                        naked_identifier: subject
+                    - end_bracket: )
             - comma: ','
             - expression:
-                typeless_struct:
-                  keyword: STRUCT
-                  bracketed:
-                  - start_bracket: (
-                  - quoted_literal: "'hi'"
-                  - alias_expression:
-                      keyword: AS
-                      naked_identifier: greeting
-                  - comma: ','
-                  - quoted_literal: "'there'"
-                  - alias_expression:
-                      keyword: AS
-                      naked_identifier: subject
-                  - end_bracket: )
+                typed_struct_literal:
+                  struct_type:
+                    keyword: STRUCT
+                  struct_literal:
+                    bracketed:
+                    - start_bracket: (
+                    - quoted_literal: "'hi'"
+                    - alias_expression:
+                        keyword: AS
+                        naked_identifier: greeting
+                    - comma: ','
+                    - quoted_literal: "'there'"
+                    - alias_expression:
+                        keyword: AS
+                        naked_identifier: subject
+                    - end_bracket: )
             - end_bracket: )
           alias_expression:
             keyword: AS
@@ -91,26 +95,28 @@ file:
                   - naked_identifier: xxx
               - keyword: THEN
               - expression:
-                  typeless_struct:
-                    keyword: STRUCT
-                    bracketed:
-                    - start_bracket: (
-                    - column_reference:
-                      - naked_identifier: a
-                      - dot: .
-                      - naked_identifier: xxx
-                    - alias_expression:
-                        keyword: AS
-                        naked_identifier: M
-                    - comma: ','
-                    - column_reference:
-                      - naked_identifier: b
-                      - dot: .
-                      - naked_identifier: xxx
-                    - alias_expression:
-                        keyword: AS
-                        naked_identifier: N
-                    - end_bracket: )
+                  typed_struct_literal:
+                    struct_type:
+                      keyword: STRUCT
+                    struct_literal:
+                      bracketed:
+                      - start_bracket: (
+                      - column_reference:
+                        - naked_identifier: a
+                        - dot: .
+                        - naked_identifier: xxx
+                      - alias_expression:
+                          keyword: AS
+                          naked_identifier: M
+                      - comma: ','
+                      - column_reference:
+                        - naked_identifier: b
+                        - dot: .
+                        - naked_identifier: xxx
+                      - alias_expression:
+                          keyword: AS
+                          naked_identifier: N
+                      - end_bracket: )
             - keyword: END
           alias_expression:
             keyword: AS
diff --git a/test/fixtures/dialects/clickhouse/create_materialized_view.sql b/test/fixtures/dialects/clickhouse/create_materialized_view.sql
new file mode 100644
index 0000000..641d50b
--- /dev/null
+++ b/test/fixtures/dialects/clickhouse/create_materialized_view.sql
@@ -0,0 +1,50 @@
+CREATE MATERIALIZED VIEW IF NOT EXISTS db.table_mv
+TO db.table
+AS
+    SELECT
+        column1,
+        column2
+    FROM db.table_kafka;
+
+CREATE MATERIALIZED VIEW table_mv
+TO table
+AS
+    SELECT
+        column1,
+        column2
+    FROM table_kafka;
+
+CREATE MATERIALIZED VIEW IF NOT EXISTS db.table_mv
+ON CLUSTER mycluster
+TO db.table
+AS
+    SELECT
+        column1,
+        column2
+    FROM db.table_kafka;
+
+CREATE MATERIALIZED VIEW table_mv
+TO table
+ENGINE = MergeTree()
+AS
+    SELECT
+        column1,
+        column2
+    FROM table_kafka;
+
+CREATE MATERIALIZED VIEW table_mv
+ENGINE = MergeTree()
+AS
+    SELECT
+        column1,
+        column2
+    FROM table_kafka;
+
+CREATE MATERIALIZED VIEW table_mv
+ENGINE = MergeTree()
+POPULATE
+AS
+    SELECT
+        column1,
+        column2
+    FROM table_kafka;
diff --git a/test/fixtures/dialects/clickhouse/create_materialized_view.yml b/test/fixtures/dialects/clickhouse/create_materialized_view.yml
new file mode 100644
index 0000000..4e9ccac
--- /dev/null
+++ b/test/fixtures/dialects/clickhouse/create_materialized_view.yml
@@ -0,0 +1,229 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: a5d6cd3865f0a5a4395112277b201da1a9794712fcf125b9269ef777e366bc21
+file:
+- statement:
+    create_materialized_view_statement:
+    - keyword: CREATE
+    - keyword: MATERIALIZED
+    - keyword: VIEW
+    - keyword: IF
+    - keyword: NOT
+    - keyword: EXISTS
+    - table_reference:
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: table_mv
+    - keyword: TO
+    - table_reference:
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: table
+    - keyword: AS
+    - select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column1
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column2
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                - naked_identifier: db
+                - dot: .
+                - naked_identifier: table_kafka
+- statement_terminator: ;
+- statement:
+    create_materialized_view_statement:
+    - keyword: CREATE
+    - keyword: MATERIALIZED
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: table_mv
+    - keyword: TO
+    - table_reference:
+        naked_identifier: table
+    - keyword: AS
+    - select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column1
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column2
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: table_kafka
+- statement_terminator: ;
+- statement:
+    create_materialized_view_statement:
+    - keyword: CREATE
+    - keyword: MATERIALIZED
+    - keyword: VIEW
+    - keyword: IF
+    - keyword: NOT
+    - keyword: EXISTS
+    - table_reference:
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: table_mv
+    - keyword: 'ON'
+    - keyword: CLUSTER
+    - expression:
+        column_reference:
+          naked_identifier: mycluster
+    - keyword: TO
+    - table_reference:
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: table
+    - keyword: AS
+    - select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column1
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column2
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                - naked_identifier: db
+                - dot: .
+                - naked_identifier: table_kafka
+- statement_terminator: ;
+- statement:
+    create_materialized_view_statement:
+    - keyword: CREATE
+    - keyword: MATERIALIZED
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: table_mv
+    - keyword: TO
+    - table_reference:
+        naked_identifier: table
+    - engine:
+        keyword: ENGINE
+        comparison_operator:
+          raw_comparison_operator: '='
+        engine_function:
+          function_name:
+            function_name_identifier: MergeTree
+          bracketed:
+            start_bracket: (
+            end_bracket: )
+    - keyword: AS
+    - select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column1
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column2
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: table_kafka
+- statement_terminator: ;
+- statement:
+    create_materialized_view_statement:
+    - keyword: CREATE
+    - keyword: MATERIALIZED
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: table_mv
+    - engine:
+        keyword: ENGINE
+        comparison_operator:
+          raw_comparison_operator: '='
+        engine_function:
+          function_name:
+            function_name_identifier: MergeTree
+          bracketed:
+            start_bracket: (
+            end_bracket: )
+    - keyword: AS
+    - select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column1
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column2
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: table_kafka
+- statement_terminator: ;
+- statement:
+    create_materialized_view_statement:
+    - keyword: CREATE
+    - keyword: MATERIALIZED
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: table_mv
+    - engine:
+        keyword: ENGINE
+        comparison_operator:
+          raw_comparison_operator: '='
+        engine_function:
+          function_name:
+            function_name_identifier: MergeTree
+          bracketed:
+            start_bracket: (
+            end_bracket: )
+    - keyword: POPULATE
+    - keyword: AS
+    - select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column1
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column2
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: table_kafka
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/clickhouse/create_table.yml b/test/fixtures/dialects/clickhouse/create_table.yml
index d8dd4f4..27bb9ae 100644
--- a/test/fixtures/dialects/clickhouse/create_table.yml
+++ b/test/fixtures/dialects/clickhouse/create_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 8e1c3bbca74a1db024b87f5c5657fb94c22cdc895cd6716d8708f832a3c4e768
+_hash: 70f6acdf665d02008e12fa18131d686ca8a206117c271538b8d1c82b9154e116
 file:
 - statement:
     create_table_statement:
@@ -477,12 +477,11 @@ file:
           naked_identifier: s
           data_type:
             data_type_identifier: Nullable
-            bracketed:
-              start_bracket: (
-              expression:
-                column_reference:
-                  naked_identifier: String
-              end_bracket: )
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                data_type_identifier: String
+                end_bracket: )
       - end_bracket: )
     - engine:
       - keyword: ENGINE
@@ -530,20 +529,26 @@ file:
         naked_identifier: all_hits
     - keyword: 'ON'
     - keyword: CLUSTER
-    - cluster_reference:
-        naked_identifier: cluster
-    - bracketed:
-      - start_bracket: (
-      - column_definition:
-          naked_identifier: p
-          data_type:
-            data_type_identifier: Date
-      - comma: ','
-      - column_definition:
-          naked_identifier: i
-          data_type:
-            data_type_identifier: Int32
-      - end_bracket: )
+    - expression:
+        function:
+          function_name:
+            function_name_identifier: cluster
+          bracketed:
+          - start_bracket: (
+          - expression:
+              column_reference:
+                naked_identifier: p
+          - expression:
+              column_reference:
+                naked_identifier: Date
+          - comma: ','
+          - expression:
+              column_reference:
+                naked_identifier: i
+          - expression:
+              column_reference:
+                naked_identifier: Int32
+          - end_bracket: )
     - engine:
         keyword: ENGINE
         comparison_operator:
diff --git a/test/fixtures/dialects/clickhouse/join.sql b/test/fixtures/dialects/clickhouse/join.sql
new file mode 100644
index 0000000..5f6df46
--- /dev/null
+++ b/test/fixtures/dialects/clickhouse/join.sql
@@ -0,0 +1,69 @@
+-- no type join
+SELECT * FROM test1 ALL JOIN test2 ON test2.ty1=test1.ty1;
+SELECT * FROM test1 ANY JOIN test2 ON test2.ty1=test1.ty1;
+SELECT * FROM test1 JOIN test2 ON test2.ty1=test1.ty1;
+-- INNER join
+SELECT * FROM test1 INNER JOIN test2 ON test2.ty1=test1.ty1;
+-- INNER join ...
+SELECT * FROM test1 INNER ALL JOIN test2 ON test2.ty1=test1.ty1;
+SELECT * FROM test1 INNER ANY JOIN test2 ON test2.ty1=test1.ty1;
+-- ... INNER join
+SELECT * FROM test1 ALL INNER JOIN test2 ON test2.ty1=test1.ty1;
+SELECT * FROM test1 ANY INNER JOIN test2 ON test2.ty1=test1.ty1;
+-- LEFT JOIN
+SELECT * FROM test1 LEFT JOIN test2 ON test2.ty1=test1.ty1;
+-- LEFT join ...
+SELECT tbl1.id FROM tbl1 LEFT ANTI join tbl2 on tbl1.id = tbl2.id;
+SELECT * FROM test1 as t1 LEFT SEMI JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 LEFT ANY JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 LEFT ALL JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 LEFT ASOF JOIN test2 USING ty1,ty2;
+-- ... LEFT join
+select tbl1.id from tbl1  ANTI LEFT join tbl2 on tbl1.id = tbl2.id;
+SELECT * FROM test1 as t1 SEMI LEFT JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 ANY LEFT JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 ALL LEFT JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 ASOF LEFT JOIN test2 USING (ty1,ty2);
+-- LEFT join test case OUTER
+SELECT * FROM test1 as t1 LEFT OUTER JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 LEFT ASOF OUTER JOIN test2 USING ty1,ty2;
+SELECT tbl1.id FROM tbl1 LEFT ANTI OUTER join tbl2 on tbl1.id = tbl2.id;
+SELECT * FROM test1 as t1 LEFT SEMI OUTER JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 LEFT ANY OUTER JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 LEFT ALL OUTER JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 LEFT ASOF OUTER JOIN test2 USING ty1,ty2;
+-- RIGHT JOIN
+SELECT * FROM test1 RIGHT JOIN test2 ON test2.ty1=test1.ty1;
+-- RIGHT join ...
+SELECT tbl1.id FROM tbl1 RIGHT ANTI join tbl2 on tbl1.id = tbl2.id;
+SELECT * FROM test1 as t1 RIGHT SEMI JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 RIGHT ANY JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 RIGHT ALL JOIN test2 USING ty1,ty2;
+-- ... RIGHT join
+select tbl1.id from tbl1  ANTI RIGHT join tbl2 on tbl1.id = tbl2.id;
+SELECT * FROM test1 as t1 SEMI RIGHT JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 ANY RIGHT JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 ALL RIGHT JOIN test2 USING ty1,ty2;
+-- RIGHT join test case OUTER
+SELECT * FROM test1 as t1 RIGHT OUTER JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 RIGHT ANTI OUTER JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 RIGHT SEMI OUTER JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 RIGHT ANY OUTER JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 RIGHT ALL OUTER JOIN test2 USING ty1,ty2;
+-- ASOF join
+select tbl1.id from tbl1 ASOF JOIN tbl2 on tbl1.id = tbl2.id;
+-- CROSS join
+SELECT * FROM test1 CROSS JOIN test2;
+-- FULL join
+SELECT * FROM test1 as t1 FULL ALL JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 FULL JOIN test2 USING ty1,ty2;
+SELECT * FROM test1 as t1 FULL ALL OUTER JOIN test2 USING ty1,ty2;
+-- ARRAY join
+SELECT col FROM (SELECT arr FROM test1) AS t2 ARRAY JOIN arr AS col;
+SELECT col FROM (SELECT [1, 2] AS arr) AS t1 LEFT ARRAY JOIN arr AS col;
+SELECT * FROM (SELECT [1, 2] AS arr) AS t1 ARRAY JOIN arr;
+SELECT * FROM (SELECT [1, 2] AS arr) AS t1 LEFT ARRAY JOIN arr;
+SELECT * FROM (SELECT [1, 2] AS arr, [3, 4] AS arr2) AS t1 ARRAY JOIN arr, arr2;
+SELECT x, y FROM (SELECT [1, 2] AS arr, [3, 4] AS arr2) AS t1 ARRAY JOIN arr AS x, arr2 AS y;
+SELECT *,ch,cg FROM (SELECT 1) ARRAY JOIN ['1','2'] as cg, splitByChar(',','1,2') as ch;
+SELECT * FROM (SELECT [1,2] x) AS t1 ARRAY JOIN t1.*;
diff --git a/test/fixtures/dialects/clickhouse/join.yml b/test/fixtures/dialects/clickhouse/join.yml
new file mode 100644
index 0000000..00fe83c
--- /dev/null
+++ b/test/fixtures/dialects/clickhouse/join.yml
@@ -0,0 +1,1881 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 691193ed2bf48b17d52586c0663c81b562d7f2c58769fcd4349b4cb2cdb06511
+file:
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+          join_clause:
+          - keyword: ALL
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - join_on_condition:
+              keyword: 'ON'
+              expression:
+              - column_reference:
+                - naked_identifier: test2
+                - dot: .
+                - naked_identifier: ty1
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: test1
+                - dot: .
+                - naked_identifier: ty1
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+          join_clause:
+          - keyword: ANY
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - join_on_condition:
+              keyword: 'ON'
+              expression:
+              - column_reference:
+                - naked_identifier: test2
+                - dot: .
+                - naked_identifier: ty1
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: test1
+                - dot: .
+                - naked_identifier: ty1
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+          join_clause:
+            keyword: JOIN
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+            join_on_condition:
+              keyword: 'ON'
+              expression:
+              - column_reference:
+                - naked_identifier: test2
+                - dot: .
+                - naked_identifier: ty1
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: test1
+                - dot: .
+                - naked_identifier: ty1
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+          join_clause:
+          - keyword: INNER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - join_on_condition:
+              keyword: 'ON'
+              expression:
+              - column_reference:
+                - naked_identifier: test2
+                - dot: .
+                - naked_identifier: ty1
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: test1
+                - dot: .
+                - naked_identifier: ty1
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+          join_clause:
+          - keyword: INNER
+          - keyword: ALL
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - join_on_condition:
+              keyword: 'ON'
+              expression:
+              - column_reference:
+                - naked_identifier: test2
+                - dot: .
+                - naked_identifier: ty1
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: test1
+                - dot: .
+                - naked_identifier: ty1
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+          join_clause:
+          - keyword: INNER
+          - keyword: ANY
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - join_on_condition:
+              keyword: 'ON'
+              expression:
+              - column_reference:
+                - naked_identifier: test2
+                - dot: .
+                - naked_identifier: ty1
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: test1
+                - dot: .
+                - naked_identifier: ty1
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+          join_clause:
+          - keyword: ALL
+          - keyword: INNER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - join_on_condition:
+              keyword: 'ON'
+              expression:
+              - column_reference:
+                - naked_identifier: test2
+                - dot: .
+                - naked_identifier: ty1
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: test1
+                - dot: .
+                - naked_identifier: ty1
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+          join_clause:
+          - keyword: ANY
+          - keyword: INNER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - join_on_condition:
+              keyword: 'ON'
+              expression:
+              - column_reference:
+                - naked_identifier: test2
+                - dot: .
+                - naked_identifier: ty1
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: test1
+                - dot: .
+                - naked_identifier: ty1
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+          join_clause:
+          - keyword: LEFT
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - join_on_condition:
+              keyword: 'ON'
+              expression:
+              - column_reference:
+                - naked_identifier: test2
+                - dot: .
+                - naked_identifier: ty1
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: test1
+                - dot: .
+                - naked_identifier: ty1
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          column_reference:
+          - naked_identifier: tbl1
+          - dot: .
+          - naked_identifier: id
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: tbl1
+          join_clause:
+          - keyword: LEFT
+          - keyword: ANTI
+          - keyword: join
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: tbl2
+          - join_on_condition:
+              keyword: 'on'
+              expression:
+              - column_reference:
+                - naked_identifier: tbl1
+                - dot: .
+                - naked_identifier: id
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: tbl2
+                - dot: .
+                - naked_identifier: id
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: LEFT
+          - keyword: SEMI
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: LEFT
+          - keyword: ANY
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: LEFT
+          - keyword: ALL
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: LEFT
+          - keyword: ASOF
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: select
+        select_clause_element:
+          column_reference:
+          - naked_identifier: tbl1
+          - dot: .
+          - naked_identifier: id
+      from_clause:
+        keyword: from
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: tbl1
+          join_clause:
+          - keyword: ANTI
+          - keyword: LEFT
+          - keyword: join
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: tbl2
+          - join_on_condition:
+              keyword: 'on'
+              expression:
+              - column_reference:
+                - naked_identifier: tbl1
+                - dot: .
+                - naked_identifier: id
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: tbl2
+                - dot: .
+                - naked_identifier: id
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: SEMI
+          - keyword: LEFT
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: ANY
+          - keyword: LEFT
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: ALL
+          - keyword: LEFT
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: ASOF
+          - keyword: LEFT
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - bracketed:
+            - start_bracket: (
+            - naked_identifier: ty1
+            - comma: ','
+            - naked_identifier: ty2
+            - end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: LEFT
+          - keyword: OUTER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: LEFT
+          - keyword: ASOF
+          - keyword: OUTER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          column_reference:
+          - naked_identifier: tbl1
+          - dot: .
+          - naked_identifier: id
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: tbl1
+          join_clause:
+          - keyword: LEFT
+          - keyword: ANTI
+          - keyword: OUTER
+          - keyword: join
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: tbl2
+          - join_on_condition:
+              keyword: 'on'
+              expression:
+              - column_reference:
+                - naked_identifier: tbl1
+                - dot: .
+                - naked_identifier: id
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: tbl2
+                - dot: .
+                - naked_identifier: id
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: LEFT
+          - keyword: SEMI
+          - keyword: OUTER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: LEFT
+          - keyword: ANY
+          - keyword: OUTER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: LEFT
+          - keyword: ALL
+          - keyword: OUTER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: LEFT
+          - keyword: ASOF
+          - keyword: OUTER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+          join_clause:
+          - keyword: RIGHT
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - join_on_condition:
+              keyword: 'ON'
+              expression:
+              - column_reference:
+                - naked_identifier: test2
+                - dot: .
+                - naked_identifier: ty1
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: test1
+                - dot: .
+                - naked_identifier: ty1
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          column_reference:
+          - naked_identifier: tbl1
+          - dot: .
+          - naked_identifier: id
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: tbl1
+          join_clause:
+          - keyword: RIGHT
+          - keyword: ANTI
+          - keyword: join
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: tbl2
+          - join_on_condition:
+              keyword: 'on'
+              expression:
+              - column_reference:
+                - naked_identifier: tbl1
+                - dot: .
+                - naked_identifier: id
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: tbl2
+                - dot: .
+                - naked_identifier: id
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: RIGHT
+          - keyword: SEMI
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: RIGHT
+          - keyword: ANY
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: RIGHT
+          - keyword: ALL
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: select
+        select_clause_element:
+          column_reference:
+          - naked_identifier: tbl1
+          - dot: .
+          - naked_identifier: id
+      from_clause:
+        keyword: from
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: tbl1
+          join_clause:
+          - keyword: ANTI
+          - keyword: RIGHT
+          - keyword: join
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: tbl2
+          - join_on_condition:
+              keyword: 'on'
+              expression:
+              - column_reference:
+                - naked_identifier: tbl1
+                - dot: .
+                - naked_identifier: id
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: tbl2
+                - dot: .
+                - naked_identifier: id
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: SEMI
+          - keyword: RIGHT
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: ANY
+          - keyword: RIGHT
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: ALL
+          - keyword: RIGHT
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: RIGHT
+          - keyword: OUTER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: RIGHT
+          - keyword: ANTI
+          - keyword: OUTER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: RIGHT
+          - keyword: SEMI
+          - keyword: OUTER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: RIGHT
+          - keyword: ANY
+          - keyword: OUTER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: RIGHT
+          - keyword: ALL
+          - keyword: OUTER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: select
+        select_clause_element:
+          column_reference:
+          - naked_identifier: tbl1
+          - dot: .
+          - naked_identifier: id
+      from_clause:
+        keyword: from
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: tbl1
+            alias_expression:
+              naked_identifier: ASOF
+          join_clause:
+            keyword: JOIN
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: tbl2
+            join_on_condition:
+              keyword: 'on'
+              expression:
+              - column_reference:
+                - naked_identifier: tbl1
+                - dot: .
+                - naked_identifier: id
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: tbl2
+                - dot: .
+                - naked_identifier: id
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+          join_clause:
+          - keyword: CROSS
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: FULL
+          - keyword: ALL
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: FULL
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test1
+            alias_expression:
+              keyword: as
+              naked_identifier: t1
+          join_clause:
+          - keyword: FULL
+          - keyword: ALL
+          - keyword: OUTER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: test2
+          - keyword: USING
+          - naked_identifier: ty1
+          - comma: ','
+          - naked_identifier: ty2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          column_reference:
+            naked_identifier: col
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              bracketed:
+                start_bracket: (
+                select_statement:
+                  select_clause:
+                    keyword: SELECT
+                    select_clause_element:
+                      column_reference:
+                        naked_identifier: arr
+                  from_clause:
+                    keyword: FROM
+                    from_expression:
+                      from_expression_element:
+                        table_expression:
+                          table_reference:
+                            naked_identifier: test1
+                end_bracket: )
+            alias_expression:
+              keyword: AS
+              naked_identifier: t2
+          array_join_clause:
+          - keyword: ARRAY
+          - keyword: JOIN
+          - select_clause_element:
+              column_reference:
+                naked_identifier: arr
+              alias_expression:
+                keyword: AS
+                naked_identifier: col
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          column_reference:
+            naked_identifier: col
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              bracketed:
+                start_bracket: (
+                select_statement:
+                  select_clause:
+                    keyword: SELECT
+                    select_clause_element:
+                      array_literal:
+                      - start_square_bracket: '['
+                      - numeric_literal: '1'
+                      - comma: ','
+                      - numeric_literal: '2'
+                      - end_square_bracket: ']'
+                      alias_expression:
+                        keyword: AS
+                        naked_identifier: arr
+                end_bracket: )
+            alias_expression:
+              keyword: AS
+              naked_identifier: t1
+          array_join_clause:
+          - keyword: LEFT
+          - keyword: ARRAY
+          - keyword: JOIN
+          - select_clause_element:
+              column_reference:
+                naked_identifier: arr
+              alias_expression:
+                keyword: AS
+                naked_identifier: col
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              bracketed:
+                start_bracket: (
+                select_statement:
+                  select_clause:
+                    keyword: SELECT
+                    select_clause_element:
+                      array_literal:
+                      - start_square_bracket: '['
+                      - numeric_literal: '1'
+                      - comma: ','
+                      - numeric_literal: '2'
+                      - end_square_bracket: ']'
+                      alias_expression:
+                        keyword: AS
+                        naked_identifier: arr
+                end_bracket: )
+            alias_expression:
+              keyword: AS
+              naked_identifier: t1
+          array_join_clause:
+          - keyword: ARRAY
+          - keyword: JOIN
+          - select_clause_element:
+              column_reference:
+                naked_identifier: arr
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              bracketed:
+                start_bracket: (
+                select_statement:
+                  select_clause:
+                    keyword: SELECT
+                    select_clause_element:
+                      array_literal:
+                      - start_square_bracket: '['
+                      - numeric_literal: '1'
+                      - comma: ','
+                      - numeric_literal: '2'
+                      - end_square_bracket: ']'
+                      alias_expression:
+                        keyword: AS
+                        naked_identifier: arr
+                end_bracket: )
+            alias_expression:
+              keyword: AS
+              naked_identifier: t1
+          array_join_clause:
+          - keyword: LEFT
+          - keyword: ARRAY
+          - keyword: JOIN
+          - select_clause_element:
+              column_reference:
+                naked_identifier: arr
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              bracketed:
+                start_bracket: (
+                select_statement:
+                  select_clause:
+                  - keyword: SELECT
+                  - select_clause_element:
+                      array_literal:
+                      - start_square_bracket: '['
+                      - numeric_literal: '1'
+                      - comma: ','
+                      - numeric_literal: '2'
+                      - end_square_bracket: ']'
+                      alias_expression:
+                        keyword: AS
+                        naked_identifier: arr
+                  - comma: ','
+                  - select_clause_element:
+                      array_literal:
+                      - start_square_bracket: '['
+                      - numeric_literal: '3'
+                      - comma: ','
+                      - numeric_literal: '4'
+                      - end_square_bracket: ']'
+                      alias_expression:
+                        keyword: AS
+                        naked_identifier: arr2
+                end_bracket: )
+            alias_expression:
+              keyword: AS
+              naked_identifier: t1
+          array_join_clause:
+          - keyword: ARRAY
+          - keyword: JOIN
+          - select_clause_element:
+              column_reference:
+                naked_identifier: arr
+          - comma: ','
+          - select_clause_element:
+              column_reference:
+                naked_identifier: arr2
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: x
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: y
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              bracketed:
+                start_bracket: (
+                select_statement:
+                  select_clause:
+                  - keyword: SELECT
+                  - select_clause_element:
+                      array_literal:
+                      - start_square_bracket: '['
+                      - numeric_literal: '1'
+                      - comma: ','
+                      - numeric_literal: '2'
+                      - end_square_bracket: ']'
+                      alias_expression:
+                        keyword: AS
+                        naked_identifier: arr
+                  - comma: ','
+                  - select_clause_element:
+                      array_literal:
+                      - start_square_bracket: '['
+                      - numeric_literal: '3'
+                      - comma: ','
+                      - numeric_literal: '4'
+                      - end_square_bracket: ']'
+                      alias_expression:
+                        keyword: AS
+                        naked_identifier: arr2
+                end_bracket: )
+            alias_expression:
+              keyword: AS
+              naked_identifier: t1
+          array_join_clause:
+          - keyword: ARRAY
+          - keyword: JOIN
+          - select_clause_element:
+              column_reference:
+                naked_identifier: arr
+              alias_expression:
+                keyword: AS
+                naked_identifier: x
+          - comma: ','
+          - select_clause_element:
+              column_reference:
+                naked_identifier: arr2
+              alias_expression:
+                keyword: AS
+                naked_identifier: y
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: ch
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: cg
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              bracketed:
+                start_bracket: (
+                select_statement:
+                  select_clause:
+                    keyword: SELECT
+                    select_clause_element:
+                      numeric_literal: '1'
+                end_bracket: )
+          array_join_clause:
+          - keyword: ARRAY
+          - keyword: JOIN
+          - select_clause_element:
+              array_literal:
+              - start_square_bracket: '['
+              - quoted_literal: "'1'"
+              - comma: ','
+              - quoted_literal: "'2'"
+              - end_square_bracket: ']'
+              alias_expression:
+                keyword: as
+                naked_identifier: cg
+          - comma: ','
+          - select_clause_element:
+              function:
+                function_name:
+                  function_name_identifier: splitByChar
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    quoted_literal: "','"
+                - comma: ','
+                - expression:
+                    quoted_literal: "'1,2'"
+                - end_bracket: )
+              alias_expression:
+                keyword: as
+                naked_identifier: ch
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              bracketed:
+                start_bracket: (
+                select_statement:
+                  select_clause:
+                    keyword: SELECT
+                    select_clause_element:
+                      array_literal:
+                      - start_square_bracket: '['
+                      - numeric_literal: '1'
+                      - comma: ','
+                      - numeric_literal: '2'
+                      - end_square_bracket: ']'
+                      alias_expression:
+                        naked_identifier: x
+                end_bracket: )
+            alias_expression:
+              keyword: AS
+              naked_identifier: t1
+          array_join_clause:
+          - keyword: ARRAY
+          - keyword: JOIN
+          - select_clause_element:
+              wildcard_expression:
+                wildcard_identifier:
+                  naked_identifier: t1
+                  dot: .
+                  star: '*'
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/clickhouse/lambda_function.sql b/test/fixtures/dialects/clickhouse/lambda_function.sql
new file mode 100644
index 0000000..cd152f8
--- /dev/null
+++ b/test/fixtures/dialects/clickhouse/lambda_function.sql
@@ -0,0 +1,3 @@
+SELECT arrayFirst(x -> x = 2, [1, 1, 2, 2]);
+
+SELECT arrayFirst(x, y -> x != y, [1, 1, 2, 2], [1, 2, 2, 3]);
diff --git a/test/fixtures/dialects/clickhouse/lambda_function.yml b/test/fixtures/dialects/clickhouse/lambda_function.yml
new file mode 100644
index 0000000..da02e9c
--- /dev/null
+++ b/test/fixtures/dialects/clickhouse/lambda_function.yml
@@ -0,0 +1,91 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: a6c721a05f4a158ae4714ce75d9126a350791a238eaaf6e33beae82779e27d3d
+file:
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: arrayFirst
+            bracketed:
+            - start_bracket: (
+            - expression:
+              - column_reference:
+                  naked_identifier: x
+              - lambda: ->
+              - column_reference:
+                  naked_identifier: x
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - numeric_literal: '2'
+            - comma: ','
+            - expression:
+                array_literal:
+                - start_square_bracket: '['
+                - numeric_literal: '1'
+                - comma: ','
+                - numeric_literal: '1'
+                - comma: ','
+                - numeric_literal: '2'
+                - comma: ','
+                - numeric_literal: '2'
+                - end_square_bracket: ']'
+            - end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: arrayFirst
+            bracketed:
+            - start_bracket: (
+            - expression:
+                column_reference:
+                  naked_identifier: x
+            - comma: ','
+            - expression:
+              - column_reference:
+                  naked_identifier: y
+              - lambda: ->
+              - column_reference:
+                  naked_identifier: x
+              - comparison_operator:
+                - raw_comparison_operator: '!'
+                - raw_comparison_operator: '='
+              - column_reference:
+                  naked_identifier: y
+            - comma: ','
+            - expression:
+                array_literal:
+                - start_square_bracket: '['
+                - numeric_literal: '1'
+                - comma: ','
+                - numeric_literal: '1'
+                - comma: ','
+                - numeric_literal: '2'
+                - comma: ','
+                - numeric_literal: '2'
+                - end_square_bracket: ']'
+            - comma: ','
+            - expression:
+                array_literal:
+                - start_square_bracket: '['
+                - numeric_literal: '1'
+                - comma: ','
+                - numeric_literal: '2'
+                - comma: ','
+                - numeric_literal: '2'
+                - comma: ','
+                - numeric_literal: '3'
+                - end_square_bracket: ']'
+            - end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/databricks/.sqlfluff b/test/fixtures/dialects/databricks/.sqlfluff
new file mode 100644
index 0000000..5aae42e
--- /dev/null
+++ b/test/fixtures/dialects/databricks/.sqlfluff
@@ -0,0 +1,2 @@
+[sqlfluff]
+dialect = databricks
diff --git a/test/fixtures/dialects/databricks/alter_catalog.sql b/test/fixtures/dialects/databricks/alter_catalog.sql
new file mode 100644
index 0000000..ac28a8e
--- /dev/null
+++ b/test/fixtures/dialects/databricks/alter_catalog.sql
@@ -0,0 +1,5 @@
+-- Transfer ownership of the catalog to another user
+ALTER CATALOG some_cat OWNER TO `alf@melmak.et`;
+
+-- SET is allowed as an optional keyword
+ALTER CATALOG some_cat SET OWNER TO `alf@melmak.et`;
diff --git a/test/fixtures/dialects/databricks/alter_catalog.yml b/test/fixtures/dialects/databricks/alter_catalog.yml
new file mode 100644
index 0000000..41e8eae
--- /dev/null
+++ b/test/fixtures/dialects/databricks/alter_catalog.yml
@@ -0,0 +1,28 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 40b364e358e1643fde75134acd974436a1bb53e904e64284bbf1a163b6490b40
+file:
+- statement:
+    alter_catalog_statement:
+    - keyword: ALTER
+    - keyword: CATALOG
+    - catalog_reference:
+        naked_identifier: some_cat
+    - keyword: OWNER
+    - keyword: TO
+    - quoted_identifier: '`alf@melmak.et`'
+- statement_terminator: ;
+- statement:
+    alter_catalog_statement:
+    - keyword: ALTER
+    - keyword: CATALOG
+    - catalog_reference:
+        naked_identifier: some_cat
+    - keyword: SET
+    - keyword: OWNER
+    - keyword: TO
+    - quoted_identifier: '`alf@melmak.et`'
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/databricks/create_catalog.sql b/test/fixtures/dialects/databricks/create_catalog.sql
new file mode 100644
index 0000000..04803bb
--- /dev/null
+++ b/test/fixtures/dialects/databricks/create_catalog.sql
@@ -0,0 +1,9 @@
+-- Create catalog `customer_cat`.
+-- This throws exception if catalog with name customer_cat already exists.
+CREATE CATALOG customer_cat;
+
+-- Create catalog `customer_cat` only if catalog with same name doesn't exist.
+CREATE CATALOG IF NOT EXISTS customer_cat;
+
+-- Create catalog `customer_cat` only if catalog with same name doesn't exist, with a comment.
+CREATE CATALOG IF NOT EXISTS customer_cat COMMENT 'This is customer catalog';
diff --git a/test/fixtures/dialects/databricks/create_catalog.yml b/test/fixtures/dialects/databricks/create_catalog.yml
new file mode 100644
index 0000000..17589a9
--- /dev/null
+++ b/test/fixtures/dialects/databricks/create_catalog.yml
@@ -0,0 +1,36 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: a2d4e1bf9fa351097aede205a0244d067dc5a05213354eff1fdc94160fc227db
+file:
+- statement:
+    create_catalog_statement:
+    - keyword: CREATE
+    - keyword: CATALOG
+    - catalog_reference:
+        naked_identifier: customer_cat
+- statement_terminator: ;
+- statement:
+    create_catalog_statement:
+    - keyword: CREATE
+    - keyword: CATALOG
+    - keyword: IF
+    - keyword: NOT
+    - keyword: EXISTS
+    - catalog_reference:
+        naked_identifier: customer_cat
+- statement_terminator: ;
+- statement:
+    create_catalog_statement:
+    - keyword: CREATE
+    - keyword: CATALOG
+    - keyword: IF
+    - keyword: NOT
+    - keyword: EXISTS
+    - catalog_reference:
+        naked_identifier: customer_cat
+    - keyword: COMMENT
+    - quoted_literal: "'This is customer catalog'"
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/databricks/databricks_keywords.sql b/test/fixtures/dialects/databricks/databricks_keywords.sql
new file mode 100644
index 0000000..81914e8
--- /dev/null
+++ b/test/fixtures/dialects/databricks/databricks_keywords.sql
@@ -0,0 +1,3 @@
+select *
+from shopify_cz.order
+;
diff --git a/test/fixtures/dialects/databricks/databricks_keywords.yml b/test/fixtures/dialects/databricks/databricks_keywords.yml
new file mode 100644
index 0000000..62c8739
--- /dev/null
+++ b/test/fixtures/dialects/databricks/databricks_keywords.yml
@@ -0,0 +1,25 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: bf986346fed8101687984158446015a57adda6a314601c4bd98977bd5a5c3a8b
+file:
+  statement:
+    select_statement:
+      select_clause:
+        keyword: select
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: from
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+              - naked_identifier: shopify_cz
+              - dot: .
+              - naked_identifier: order
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/databricks/drop_catalog.sql b/test/fixtures/dialects/databricks/drop_catalog.sql
new file mode 100644
index 0000000..a93a3fe
--- /dev/null
+++ b/test/fixtures/dialects/databricks/drop_catalog.sql
@@ -0,0 +1,5 @@
+-- Drop the catalog and its schemas
+DROP CATALOG vaccine CASCADE;
+
+-- Drop the catalog using IF EXISTS and only if it is empty.
+DROP CATALOG IF EXISTS vaccine RESTRICT;
diff --git a/test/fixtures/dialects/databricks/drop_catalog.yml b/test/fixtures/dialects/databricks/drop_catalog.yml
new file mode 100644
index 0000000..557bd08
--- /dev/null
+++ b/test/fixtures/dialects/databricks/drop_catalog.yml
@@ -0,0 +1,25 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 7ea0dd1142a5697ad4fedde41ded6e228ee8f187261cbe617e7e9f231fe5a7e6
+file:
+- statement:
+    drop_catalog_statement:
+    - keyword: DROP
+    - keyword: CATALOG
+    - catalog_reference:
+        naked_identifier: vaccine
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_catalog_statement:
+    - keyword: DROP
+    - keyword: CATALOG
+    - keyword: IF
+    - keyword: EXISTS
+    - catalog_reference:
+        naked_identifier: vaccine
+    - keyword: RESTRICT
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/databricks/use_catalog.sql b/test/fixtures/dialects/databricks/use_catalog.sql
new file mode 100644
index 0000000..39b5afc
--- /dev/null
+++ b/test/fixtures/dialects/databricks/use_catalog.sql
@@ -0,0 +1,11 @@
+USE CATALOG catalog_name;
+
+-- Use the 'hive_metastore' .
+USE CATALOG hive_metastore;
+
+USE CATALOG 'hive_metastore';
+
+-- Use the 'some_catalog'
+USE CATALOG `some_catalog`;
+
+USE CATALOG some_cat;
diff --git a/test/fixtures/dialects/databricks/use_catalog.yml b/test/fixtures/dialects/databricks/use_catalog.yml
new file mode 100644
index 0000000..14eefc5
--- /dev/null
+++ b/test/fixtures/dialects/databricks/use_catalog.yml
@@ -0,0 +1,42 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: ff36608a5d372437c1110254e09a2af934a263c0e5b4c0e9a6611df9ccc4e9a7
+file:
+- statement:
+    use_catalog_statement:
+    - keyword: USE
+    - keyword: CATALOG
+    - catalog_reference:
+        naked_identifier: catalog_name
+- statement_terminator: ;
+- statement:
+    use_catalog_statement:
+    - keyword: USE
+    - keyword: CATALOG
+    - catalog_reference:
+        naked_identifier: hive_metastore
+- statement_terminator: ;
+- statement:
+    use_catalog_statement:
+    - keyword: USE
+    - keyword: CATALOG
+    - catalog_reference:
+        quoted_identifier: "'hive_metastore'"
+- statement_terminator: ;
+- statement:
+    use_catalog_statement:
+    - keyword: USE
+    - keyword: CATALOG
+    - catalog_reference:
+        quoted_identifier: '`some_catalog`'
+- statement_terminator: ;
+- statement:
+    use_catalog_statement:
+    - keyword: USE
+    - keyword: CATALOG
+    - catalog_reference:
+        naked_identifier: some_cat
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/databricks/use_database.sql b/test/fixtures/dialects/databricks/use_database.sql
new file mode 100644
index 0000000..61b5a86
--- /dev/null
+++ b/test/fixtures/dialects/databricks/use_database.sql
@@ -0,0 +1,12 @@
+USE database_name;
+
+-- Use the 'userdb'
+USE userdb;
+
+-- Use the 'userdb1'
+USE userdb1;
+
+-- Keywords SCHEMA and DATABASE are interchangeable.
+USE DATABASE database_name;
+
+USE SCHEMA database_name;
diff --git a/test/fixtures/dialects/databricks/use_database.yml b/test/fixtures/dialects/databricks/use_database.yml
new file mode 100644
index 0000000..3ed7bc9
--- /dev/null
+++ b/test/fixtures/dialects/databricks/use_database.yml
@@ -0,0 +1,39 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: e0a4c00d164b502c2bde84b7825b98a56478c8eed83c13cd9dc9dcd6f0e2bbb2
+file:
+- statement:
+    use_statement:
+      keyword: USE
+      database_reference:
+        naked_identifier: database_name
+- statement_terminator: ;
+- statement:
+    use_statement:
+      keyword: USE
+      database_reference:
+        naked_identifier: userdb
+- statement_terminator: ;
+- statement:
+    use_statement:
+      keyword: USE
+      database_reference:
+        naked_identifier: userdb1
+- statement_terminator: ;
+- statement:
+    use_database_statement:
+    - keyword: USE
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: database_name
+- statement_terminator: ;
+- statement:
+    use_database_statement:
+    - keyword: USE
+    - keyword: SCHEMA
+    - database_reference:
+        naked_identifier: database_name
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/db2/case.sql b/test/fixtures/dialects/db2/case.sql
new file mode 100644
index 0000000..d2f1b78
--- /dev/null
+++ b/test/fixtures/dialects/db2/case.sql
@@ -0,0 +1,6 @@
+SELECT CASE
+    WHEN ROLL = 1 THEN DAG
+    WHEN ROLL > 1 THEN DAG_MOD - 1 DAYS
+END
+FROM
+    MY_TABLE;
diff --git a/test/fixtures/dialects/db2/case.yml b/test/fixtures/dialects/db2/case.yml
new file mode 100644
index 0000000..0ccb582
--- /dev/null
+++ b/test/fixtures/dialects/db2/case.yml
@@ -0,0 +1,51 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: e3d61bb2d36284751454c0b350bb2ff492fec31530e7c3672c64f1caac175325
+file:
+  statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+            case_expression:
+            - keyword: CASE
+            - when_clause:
+              - keyword: WHEN
+              - expression:
+                  column_reference:
+                    naked_identifier: ROLL
+                  comparison_operator:
+                    raw_comparison_operator: '='
+                  numeric_literal: '1'
+              - keyword: THEN
+              - expression:
+                  column_reference:
+                    naked_identifier: DAG
+            - when_clause:
+              - keyword: WHEN
+              - expression:
+                  column_reference:
+                    naked_identifier: ROLL
+                  comparison_operator:
+                    raw_comparison_operator: '>'
+                  numeric_literal: '1'
+              - keyword: THEN
+              - expression:
+                  column_reference:
+                    naked_identifier: DAG_MOD
+                  binary_operator: '-'
+                  numeric_literal: '1'
+                  keyword: DAYS
+            - keyword: END
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/db2/create_table_field_name_with_pound_sign.yml b/test/fixtures/dialects/db2/create_table_field_name_with_pound_sign.yml
index fac9209..857d28a 100644
--- a/test/fixtures/dialects/db2/create_table_field_name_with_pound_sign.yml
+++ b/test/fixtures/dialects/db2/create_table_field_name_with_pound_sign.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 10f3b4f58d751b37ee4aa9a901fdea77a7ec0175e149f9288a0168cf1462fdf5
+_hash: 13105ac0dbc90718b62d6e22bace4ccb56603dbc9629cffda3add9014a2307a1
 file:
   statement:
     create_table_statement:
@@ -17,39 +17,36 @@ file:
           naked_identifier: my_field_1#
           data_type:
             data_type_identifier: decimal
-            bracketed:
-            - start_bracket: (
-            - expression:
-                numeric_literal: '2'
-            - comma: ','
-            - expression:
-                numeric_literal: '0'
-            - end_bracket: )
+            bracketed_arguments:
+              bracketed:
+              - start_bracket: (
+              - numeric_literal: '2'
+              - comma: ','
+              - numeric_literal: '0'
+              - end_bracket: )
       - comma: ','
       - column_definition:
           naked_identifier: '#my_field_1'
           data_type:
             data_type_identifier: decimal
-            bracketed:
-            - start_bracket: (
-            - expression:
-                numeric_literal: '2'
-            - comma: ','
-            - expression:
-                numeric_literal: '0'
-            - end_bracket: )
+            bracketed_arguments:
+              bracketed:
+              - start_bracket: (
+              - numeric_literal: '2'
+              - comma: ','
+              - numeric_literal: '0'
+              - end_bracket: )
       - comma: ','
       - column_definition:
           naked_identifier: '#'
           data_type:
             data_type_identifier: decimal
-            bracketed:
-            - start_bracket: (
-            - expression:
-                numeric_literal: '2'
-            - comma: ','
-            - expression:
-                numeric_literal: '0'
-            - end_bracket: )
+            bracketed_arguments:
+              bracketed:
+              - start_bracket: (
+              - numeric_literal: '2'
+              - comma: ','
+              - numeric_literal: '0'
+              - end_bracket: )
       - end_bracket: )
   statement_terminator: ;
diff --git a/test/fixtures/dialects/db2/day_unit.sql b/test/fixtures/dialects/db2/day_unit.sql
new file mode 100644
index 0000000..3c2cade
--- /dev/null
+++ b/test/fixtures/dialects/db2/day_unit.sql
@@ -0,0 +1,6 @@
+SELECT CASE
+    WHEN ROLL = 1 THEN DAG
+    WHEN ROLL > 1 THEN DAG_MOD - 1 DAY
+END
+FROM
+    MY_TABLE;
diff --git a/test/fixtures/dialects/db2/day_unit.yml b/test/fixtures/dialects/db2/day_unit.yml
new file mode 100644
index 0000000..af1d3e1
--- /dev/null
+++ b/test/fixtures/dialects/db2/day_unit.yml
@@ -0,0 +1,51 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: ac1d01c1f619dd06023730cbe95c7e1485af36e400fb5488abe29aaf84f65736
+file:
+  statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+            case_expression:
+            - keyword: CASE
+            - when_clause:
+              - keyword: WHEN
+              - expression:
+                  column_reference:
+                    naked_identifier: ROLL
+                  comparison_operator:
+                    raw_comparison_operator: '='
+                  numeric_literal: '1'
+              - keyword: THEN
+              - expression:
+                  column_reference:
+                    naked_identifier: DAG
+            - when_clause:
+              - keyword: WHEN
+              - expression:
+                  column_reference:
+                    naked_identifier: ROLL
+                  comparison_operator:
+                    raw_comparison_operator: '>'
+                  numeric_literal: '1'
+              - keyword: THEN
+              - expression:
+                  column_reference:
+                    naked_identifier: DAG_MOD
+                  binary_operator: '-'
+                  numeric_literal: '1'
+                  keyword: DAY
+            - keyword: END
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/db2/function_within_group.sql b/test/fixtures/dialects/db2/function_within_group.sql
new file mode 100644
index 0000000..445abc3
--- /dev/null
+++ b/test/fixtures/dialects/db2/function_within_group.sql
@@ -0,0 +1,4 @@
+SELECT
+    LISTAGG(A_COLUMN_NAME, 'X') WITHIN GROUP(ORDER BY A_COLUMN_NAME) AS MY_COLUMN
+FROM
+    A_TABLE
diff --git a/test/fixtures/dialects/db2/function_within_group.yml b/test/fixtures/dialects/db2/function_within_group.yml
new file mode 100644
index 0000000..85e69c2
--- /dev/null
+++ b/test/fixtures/dialects/db2/function_within_group.yml
@@ -0,0 +1,45 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: e52ad2a7bebefe270132e4f297ad537f3a8b1ef1f76c0e5a39757f83c170eeca
+file:
+  statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: LISTAGG
+            bracketed:
+            - start_bracket: (
+            - expression:
+                column_reference:
+                  naked_identifier: A_COLUMN_NAME
+            - comma: ','
+            - expression:
+                quoted_literal: "'X'"
+            - end_bracket: )
+            withingroup_clause:
+            - keyword: WITHIN
+            - keyword: GROUP
+            - bracketed:
+                start_bracket: (
+                orderby_clause:
+                - keyword: ORDER
+                - keyword: BY
+                - column_reference:
+                    naked_identifier: A_COLUMN_NAME
+                end_bracket: )
+          alias_expression:
+            keyword: AS
+            naked_identifier: MY_COLUMN
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: A_TABLE
diff --git a/test/fixtures/dialects/db2/over.sql b/test/fixtures/dialects/db2/over.sql
new file mode 100644
index 0000000..29626ee
--- /dev/null
+++ b/test/fixtures/dialects/db2/over.sql
@@ -0,0 +1,3 @@
+SELECT RANK() OVER (PARTITION BY ABCD ORDER BY EFGH DESC) AS A_RANK
+FROM
+    A_TABLE;
diff --git a/test/fixtures/dialects/db2/over.yml b/test/fixtures/dialects/db2/over.yml
new file mode 100644
index 0000000..533642b
--- /dev/null
+++ b/test/fixtures/dialects/db2/over.yml
@@ -0,0 +1,47 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 787b10f868c97797f33bd2fba027084919e3ab3beaa5f94f526a7bdd71d37f29
+file:
+  statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: RANK
+            bracketed:
+              start_bracket: (
+              end_bracket: )
+            over_clause:
+              keyword: OVER
+              bracketed:
+                start_bracket: (
+                window_specification:
+                  partitionby_clause:
+                  - keyword: PARTITION
+                  - keyword: BY
+                  - expression:
+                      column_reference:
+                        naked_identifier: ABCD
+                  orderby_clause:
+                  - keyword: ORDER
+                  - keyword: BY
+                  - column_reference:
+                      naked_identifier: EFGH
+                  - keyword: DESC
+                end_bracket: )
+          alias_expression:
+            keyword: AS
+            naked_identifier: A_RANK
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: A_TABLE
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/duckdb/.sqlfluff b/test/fixtures/dialects/duckdb/.sqlfluff
new file mode 100644
index 0000000..b326f22
--- /dev/null
+++ b/test/fixtures/dialects/duckdb/.sqlfluff
@@ -0,0 +1,2 @@
+[sqlfluff]
+dialect = duckdb
diff --git a/test/fixtures/dialects/duckdb/group_order_by_all.sql b/test/fixtures/dialects/duckdb/group_order_by_all.sql
new file mode 100644
index 0000000..71db6fa
--- /dev/null
+++ b/test/fixtures/dialects/duckdb/group_order_by_all.sql
@@ -0,0 +1,34 @@
+SELECT
+    systems,
+    planets,
+    cities,
+    cantinas,
+    SUM(scum + villainy) as total_scum_and_villainy
+FROM star_wars_locations
+GROUP BY ALL
+;
+
+SELECT
+    * EXCLUDE (cantinas, booths, scum, villainy),
+    SUM(scum + villainy) as total_scum_and_villainy
+FROM star_wars_locations
+GROUP BY ALL
+;
+
+SELECT
+    age,
+    sum(civility) as total_civility
+FROM star_wars_universe
+GROUP BY ALL
+ORDER BY ALL
+;
+
+SELECT
+    x_wing,
+    proton_torpedoes,
+    --targeting_computer
+FROM luke_whats_wrong
+GROUP BY
+    x_wing,
+    proton_torpedoes,
+;
diff --git a/test/fixtures/dialects/duckdb/group_order_by_all.yml b/test/fixtures/dialects/duckdb/group_order_by_all.yml
new file mode 100644
index 0000000..691152b
--- /dev/null
+++ b/test/fixtures/dialects/duckdb/group_order_by_all.yml
@@ -0,0 +1,173 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 2669ec22912c0564cc026fb2472887b407e941b4790df8a2407f45fc0611f10a
+file:
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: systems
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: planets
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: cities
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: cantinas
+      - comma: ','
+      - select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: SUM
+            bracketed:
+              start_bracket: (
+              expression:
+              - column_reference:
+                  naked_identifier: scum
+              - binary_operator: +
+              - column_reference:
+                  naked_identifier: villainy
+              end_bracket: )
+          alias_expression:
+            keyword: as
+            naked_identifier: total_scum_and_villainy
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: star_wars_locations
+      groupby_clause:
+      - keyword: GROUP
+      - keyword: BY
+      - keyword: ALL
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+          keyword: EXCLUDE
+          bracketed:
+          - start_bracket: (
+          - column_reference:
+              naked_identifier: cantinas
+          - comma: ','
+          - column_reference:
+              naked_identifier: booths
+          - comma: ','
+          - column_reference:
+              naked_identifier: scum
+          - comma: ','
+          - column_reference:
+              naked_identifier: villainy
+          - end_bracket: )
+      - comma: ','
+      - select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: SUM
+            bracketed:
+              start_bracket: (
+              expression:
+              - column_reference:
+                  naked_identifier: scum
+              - binary_operator: +
+              - column_reference:
+                  naked_identifier: villainy
+              end_bracket: )
+          alias_expression:
+            keyword: as
+            naked_identifier: total_scum_and_villainy
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: star_wars_locations
+      groupby_clause:
+      - keyword: GROUP
+      - keyword: BY
+      - keyword: ALL
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: age
+      - comma: ','
+      - select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: sum
+            bracketed:
+              start_bracket: (
+              expression:
+                column_reference:
+                  naked_identifier: civility
+              end_bracket: )
+          alias_expression:
+            keyword: as
+            naked_identifier: total_civility
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: star_wars_universe
+      groupby_clause:
+      - keyword: GROUP
+      - keyword: BY
+      - keyword: ALL
+      orderby_clause:
+      - keyword: ORDER
+      - keyword: BY
+      - keyword: ALL
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: x_wing
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: proton_torpedoes
+      - comma: ','
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: luke_whats_wrong
+      groupby_clause:
+      - keyword: GROUP
+      - keyword: BY
+      - column_reference:
+          naked_identifier: x_wing
+      - comma: ','
+      - column_reference:
+          naked_identifier: proton_torpedoes
+      - comma: ','
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/duckdb/list_struct.sql b/test/fixtures/dialects/duckdb/list_struct.sql
new file mode 100644
index 0000000..c062469
--- /dev/null
+++ b/test/fixtures/dialects/duckdb/list_struct.sql
@@ -0,0 +1,17 @@
+SELECT
+    ['A-Wing', 'B-Wing', 'X-Wing', 'Y-Wing'] as starfighter_list,
+    {name: 'Star Destroyer', common_misconceptions: 'Can''t in fact destroy a star'} as star_destroyer_facts
+;
+
+SELECT
+    starfighter_list[2:2] as dont_forget_the_b_wing
+FROM (SELECT ['A-Wing', 'B-Wing', 'X-Wing', 'Y-Wing'] as starfighter_list);
+
+SELECT 'I love you! I know'[:-3] as nearly_soloed;
+
+SELECT
+    planet.name,
+    planet."Amount of sand"
+FROM (SELECT {name: 'Tatooine', 'Amount of sand': 'High'} as planet)
+;
+
diff --git a/test/fixtures/dialects/duckdb/list_struct.yml b/test/fixtures/dialects/duckdb/list_struct.yml
new file mode 100644
index 0000000..7ad60b7
--- /dev/null
+++ b/test/fixtures/dialects/duckdb/list_struct.yml
@@ -0,0 +1,147 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: b65a1a966a8dd1bcc2bd1e9db40e00d2103e47e8ba47c4f5af5e3c957007a140
+file:
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          array_literal:
+          - start_square_bracket: '['
+          - quoted_literal: "'A-Wing'"
+          - comma: ','
+          - quoted_literal: "'B-Wing'"
+          - comma: ','
+          - quoted_literal: "'X-Wing'"
+          - comma: ','
+          - quoted_literal: "'Y-Wing'"
+          - end_square_bracket: ']'
+          alias_expression:
+            keyword: as
+            naked_identifier: starfighter_list
+      - comma: ','
+      - select_clause_element:
+          object_literal:
+          - start_curly_bracket: '{'
+          - object_literal_element:
+              naked_identifier: name
+              colon: ':'
+              quoted_literal: "'Star Destroyer'"
+          - comma: ','
+          - object_literal_element:
+              naked_identifier: common_misconceptions
+              colon: ':'
+              quoted_literal: "'Can''t in fact destroy a star'"
+          - end_curly_bracket: '}'
+          alias_expression:
+            keyword: as
+            naked_identifier: star_destroyer_facts
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+            column_reference:
+              naked_identifier: starfighter_list
+            array_accessor:
+            - start_square_bracket: '['
+            - numeric_literal: '2'
+            - slice: ':'
+            - numeric_literal: '2'
+            - end_square_bracket: ']'
+          alias_expression:
+            keyword: as
+            naked_identifier: dont_forget_the_b_wing
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              bracketed:
+                start_bracket: (
+                select_statement:
+                  select_clause:
+                    keyword: SELECT
+                    select_clause_element:
+                      array_literal:
+                      - start_square_bracket: '['
+                      - quoted_literal: "'A-Wing'"
+                      - comma: ','
+                      - quoted_literal: "'B-Wing'"
+                      - comma: ','
+                      - quoted_literal: "'X-Wing'"
+                      - comma: ','
+                      - quoted_literal: "'Y-Wing'"
+                      - end_square_bracket: ']'
+                      alias_expression:
+                        keyword: as
+                        naked_identifier: starfighter_list
+                end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+            quoted_literal: "'I love you! I know'"
+            array_accessor:
+              start_square_bracket: '['
+              slice: ':'
+              numeric_literal:
+                sign_indicator: '-'
+                numeric_literal: '3'
+              end_square_bracket: ']'
+          alias_expression:
+            keyword: as
+            naked_identifier: nearly_soloed
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+          - naked_identifier: planet
+          - dot: .
+          - naked_identifier: name
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: planet
+            dot: .
+            quoted_identifier: '"Amount of sand"'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              bracketed:
+                start_bracket: (
+                select_statement:
+                  select_clause:
+                    keyword: SELECT
+                    select_clause_element:
+                      object_literal:
+                      - start_curly_bracket: '{'
+                      - object_literal_element:
+                          naked_identifier: name
+                          colon: ':'
+                          quoted_literal: "'Tatooine'"
+                      - comma: ','
+                      - object_literal_element:
+                        - quoted_literal: "'Amount of sand'"
+                        - colon: ':'
+                        - quoted_literal: "'High'"
+                      - end_curly_bracket: '}'
+                      alias_expression:
+                        keyword: as
+                        naked_identifier: planet
+                end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/duckdb/select_exclude.sql b/test/fixtures/dialects/duckdb/select_exclude.sql
new file mode 100644
index 0000000..1279f6a
--- /dev/null
+++ b/test/fixtures/dialects/duckdb/select_exclude.sql
@@ -0,0 +1,9 @@
+SELECT * EXCLUDE (jar_jar_binks, midichlorians) FROM star_wars;
+
+SELECT
+    sw.* EXCLUDE (jar_jar_binks, midichlorians),
+    ff.* EXCLUDE cancellation
+FROM star_wars sw, firefly ff
+;
+
+SELECT * FROM star_wars;
diff --git a/test/fixtures/dialects/duckdb/select_exclude.yml b/test/fixtures/dialects/duckdb/select_exclude.yml
new file mode 100644
index 0000000..87a9b50
--- /dev/null
+++ b/test/fixtures/dialects/duckdb/select_exclude.yml
@@ -0,0 +1,95 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: a1d2d32acf940f030e8c387c2b2f487c10e99d359a2bfad44face163eb61b380
+file:
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+          keyword: EXCLUDE
+          bracketed:
+          - start_bracket: (
+          - column_reference:
+              naked_identifier: jar_jar_binks
+          - comma: ','
+          - column_reference:
+              naked_identifier: midichlorians
+          - end_bracket: )
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: star_wars
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              naked_identifier: sw
+              dot: .
+              star: '*'
+          keyword: EXCLUDE
+          bracketed:
+          - start_bracket: (
+          - column_reference:
+              naked_identifier: jar_jar_binks
+          - comma: ','
+          - column_reference:
+              naked_identifier: midichlorians
+          - end_bracket: )
+      - comma: ','
+      - select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              naked_identifier: ff
+              dot: .
+              star: '*'
+          keyword: EXCLUDE
+          column_reference:
+            naked_identifier: cancellation
+      from_clause:
+      - keyword: FROM
+      - from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: star_wars
+            alias_expression:
+              naked_identifier: sw
+      - comma: ','
+      - from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: firefly
+            alias_expression:
+              naked_identifier: ff
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: star_wars
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/duckdb/select_quoted.sql b/test/fixtures/dialects/duckdb/select_quoted.sql
new file mode 100644
index 0000000..4ca49d9
--- /dev/null
+++ b/test/fixtures/dialects/duckdb/select_quoted.sql
@@ -0,0 +1 @@
+SELECT count(*) FROM 'https://shell.duckdb.org/data/tpch/0_01/parquet/lineitem.parquet';
diff --git a/test/fixtures/dialects/duckdb/select_quoted.yml b/test/fixtures/dialects/duckdb/select_quoted.yml
new file mode 100644
index 0000000..98a0559
--- /dev/null
+++ b/test/fixtures/dialects/duckdb/select_quoted.yml
@@ -0,0 +1,27 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 41a79f9b8c4f36ac63b71783587e096a124c2d673dbc6a62081db7f6e06589a1
+file:
+  statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: count
+            bracketed:
+              start_bracket: (
+              star: '*'
+              end_bracket: )
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                quoted_identifier: "'https://shell.duckdb.org/data/tpch/0_01/parquet/lineitem.parquet'"
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/duckdb/select_replace.sql b/test/fixtures/dialects/duckdb/select_replace.sql
new file mode 100644
index 0000000..a420805
--- /dev/null
+++ b/test/fixtures/dialects/duckdb/select_replace.sql
@@ -0,0 +1,4 @@
+SELECT
+    * REPLACE (movie_count+3 as movie_count, show_count*1000 as show_count)
+FROM star_wars_owned_by_disney
+;
diff --git a/test/fixtures/dialects/duckdb/select_replace.yml b/test/fixtures/dialects/duckdb/select_replace.yml
new file mode 100644
index 0000000..567d690
--- /dev/null
+++ b/test/fixtures/dialects/duckdb/select_replace.yml
@@ -0,0 +1,44 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: ec9e5ef917d8d10cde31c8c09c7bfa140ec0982fbbfd96e6da705eced0c579c0
+file:
+  statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+          keyword: REPLACE
+          bracketed:
+          - start_bracket: (
+          - expression:
+              column_reference:
+                naked_identifier: movie_count
+              binary_operator: +
+              numeric_literal: '3'
+          - alias_expression:
+              keyword: as
+              naked_identifier: movie_count
+          - comma: ','
+          - expression:
+              column_reference:
+                naked_identifier: show_count
+              binary_operator: '*'
+              numeric_literal: '1000'
+          - alias_expression:
+              keyword: as
+              naked_identifier: show_count
+          - end_bracket: )
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: star_wars_owned_by_disney
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/exasol/AlterTableColumn.yml b/test/fixtures/dialects/exasol/AlterTableColumn.yml
index 895a66d..268cc7b 100644
--- a/test/fixtures/dialects/exasol/AlterTableColumn.yml
+++ b/test/fixtures/dialects/exasol/AlterTableColumn.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 830de392062d8919d6406b638f6a147ab37532248dd51fec82eaf3a04c3689c9
+_hash: 2b5949c81506c6ef3ba0649f65e32fd812eb7682e6080c9e181520ba389c9635
 file:
 - statement:
     alter_table_statement:
@@ -23,12 +23,13 @@ file:
               naked_identifier: new_dec
               data_type:
                 keyword: DECIMAL
-                bracketed:
-                - start_bracket: (
-                - numeric_literal: '18'
-                - comma: ','
-                - numeric_literal: '0'
-                - end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                  - start_bracket: (
+                  - numeric_literal: '18'
+                  - comma: ','
+                  - numeric_literal: '0'
+                  - end_bracket: )
 - statement_terminator: ;
 - statement:
     alter_table_statement:
@@ -46,10 +47,11 @@ file:
                 naked_identifier: new_char
                 data_type:
                   keyword: CHAR
-                  bracketed:
-                    start_bracket: (
-                    numeric_literal: '10'
-                    end_bracket: )
+                  bracketed_arguments:
+                    bracketed:
+                      start_bracket: (
+                      numeric_literal: '10'
+                      end_bracket: )
               column_constraint_segment:
                 keyword: DEFAULT
                 quoted_literal: "'some text'"
@@ -94,12 +96,13 @@ file:
             naked_identifier: i
             data_type:
               keyword: DECIMAL
-              bracketed:
-              - start_bracket: (
-              - numeric_literal: '10'
-              - comma: ','
-              - numeric_literal: '2'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - numeric_literal: '10'
+                - comma: ','
+                - numeric_literal: '2'
+                - end_bracket: )
             end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -116,10 +119,11 @@ file:
             naked_identifier: j
             data_type:
               keyword: VARCHAR
-              bracketed:
-                start_bracket: (
-                numeric_literal: '5'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  numeric_literal: '5'
+                  end_bracket: )
             column_constraint_segment:
               keyword: DEFAULT
               quoted_literal: "'text'"
diff --git a/test/fixtures/dialects/exasol/AlterTableDistributePartition.yml b/test/fixtures/dialects/exasol/AlterTableDistributePartition.yml
index 0db26df..acd431c 100644
--- a/test/fixtures/dialects/exasol/AlterTableDistributePartition.yml
+++ b/test/fixtures/dialects/exasol/AlterTableDistributePartition.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 45da3e37ea8947db3458800b70ff413f4f91f92fc0b62d6df43b7f9611fc1d9e
+_hash: 143f272ccdb4017c7171fe3e38109963c67a2871f50313aad6815ea0c0c4a379
 file:
 - statement:
     alter_table_statement:
@@ -62,10 +62,10 @@ file:
         - comma: ','
         - column_reference:
             naked_identifier: DISTRIBUTE
-        - raw: BY
-        - raw: shop_id
+        - code: BY
+        - code: shop_id
         - comma: ','
-        - raw: branch_no
+        - code: branch_no
 - statement_terminator: ;
 - statement:
     alter_table_statement:
diff --git a/test/fixtures/dialects/exasol/CreateAdapterScriptStatement.yml b/test/fixtures/dialects/exasol/CreateAdapterScriptStatement.yml
index b68f595..9fa2ebe 100644
--- a/test/fixtures/dialects/exasol/CreateAdapterScriptStatement.yml
+++ b/test/fixtures/dialects/exasol/CreateAdapterScriptStatement.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: a05ded87cb8d9924b2553b00150ec636801eedd3ee900f8302035e191f68027d
+_hash: 21c681be1b651e921ff550b0fda29a63c79586371144328c69c414204140cd26
 file:
 - statement:
     create_adapter_script:
@@ -16,10 +16,10 @@ file:
     - keyword: AS
     - script_content:
       - raw: '%'
-      - raw: jar
-      - raw: hive_jdbc_adapter
+      - code: jar
+      - code: hive_jdbc_adapter
       - raw: .
-      - raw: jar
+      - code: jar
       - raw: ;
 - function_script_terminator: /
 - statement:
@@ -36,14 +36,14 @@ file:
       - naked_identifier: adapter_dummy
     - keyword: AS
     - script_content:
-      - raw: def
-      - raw: adapter_call
+      - code: def
+      - code: adapter_call
       - bracketed:
           start_bracket: (
-          raw: in_json
+          code: in_json
           end_bracket: )
       - raw: ':'
-      - raw: return
+      - code: return
       - double_quote: '"BLABLA"'
 - function_script_terminator: /
 - statement:
@@ -60,13 +60,13 @@ file:
       - naked_identifier: adapter_dummy
     - keyword: AS
     - script_content:
-      - raw: function
-      - raw: adapter_call
+      - code: function
+      - code: adapter_call
       - bracketed:
           start_bracket: (
-          raw: in_json
+          code: in_json
           end_bracket: )
       - raw: ':'
-      - raw: return
+      - code: return
       - single_quote: "'BLABLA'"
 - function_script_terminator: /
diff --git a/test/fixtures/dialects/exasol/CreateFunctionStatement.yml b/test/fixtures/dialects/exasol/CreateFunctionStatement.yml
index db416e1..4f48116 100644
--- a/test/fixtures/dialects/exasol/CreateFunctionStatement.yml
+++ b/test/fixtures/dialects/exasol/CreateFunctionStatement.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 2e5f4282870203bb218cfe9656c1475a83f328312317f4a6c17f56643b3e2a27
+_hash: f33f127aa0ec656ffa63587a2620ba4bdcbca0870d610d867b8b656a567b6784
 file:
 - statement:
     create_function_statement:
@@ -26,10 +26,11 @@ file:
     - keyword: RETURN
     - data_type:
         keyword: VARCHAR
-        bracketed:
-          start_bracket: (
-          numeric_literal: '10'
-          end_bracket: )
+        bracketed_arguments:
+          bracketed:
+            start_bracket: (
+            numeric_literal: '10'
+            end_bracket: )
     - keyword: AS
     - variable: res
     - data_type:
@@ -79,10 +80,11 @@ file:
     - keyword: RETURN
     - data_type:
         keyword: VARCHAR
-        bracketed:
-          start_bracket: (
-          numeric_literal: '10'
-          end_bracket: )
+        bracketed_arguments:
+          bracketed:
+            start_bracket: (
+            numeric_literal: '10'
+            end_bracket: )
     - keyword: AS
     - variable: res
     - data_type:
@@ -126,10 +128,11 @@ file:
     - keyword: RETURN
     - data_type:
         keyword: VARCHAR
-        bracketed:
-          start_bracket: (
-          numeric_literal: '10'
-          end_bracket: )
+        bracketed_arguments:
+          bracketed:
+            start_bracket: (
+            numeric_literal: '10'
+            end_bracket: )
     - keyword: AS
     - variable: res
     - data_type:
@@ -183,10 +186,11 @@ file:
     - keyword: RETURN
     - data_type:
         keyword: VARCHAR
-        bracketed:
-          start_bracket: (
-          numeric_literal: '10'
-          end_bracket: )
+        bracketed_arguments:
+          bracketed:
+            start_bracket: (
+            numeric_literal: '10'
+            end_bracket: )
     - keyword: AS
     - variable: res
     - data_type:
@@ -221,10 +225,11 @@ file:
     - keyword: RETURN
     - data_type:
         keyword: VARCHAR
-        bracketed:
-          start_bracket: (
-          numeric_literal: '10'
-          end_bracket: )
+        bracketed_arguments:
+          bracketed:
+            start_bracket: (
+            numeric_literal: '10'
+            end_bracket: )
     - keyword: AS
     - variable: res
     - data_type:
@@ -307,10 +312,11 @@ file:
     - keyword: RETURN
     - data_type:
         keyword: VARCHAR
-        bracketed:
-          start_bracket: (
-          numeric_literal: '10'
-          end_bracket: )
+        bracketed_arguments:
+          bracketed:
+            start_bracket: (
+            numeric_literal: '10'
+            end_bracket: )
     - keyword: AS
     - variable: res
     - data_type:
@@ -364,10 +370,11 @@ file:
     - keyword: RETURN
     - data_type:
         keyword: VARCHAR
-        bracketed:
-          start_bracket: (
-          numeric_literal: '10'
-          end_bracket: )
+        bracketed_arguments:
+          bracketed:
+            start_bracket: (
+            numeric_literal: '10'
+            end_bracket: )
     - keyword: AS
     - variable: res
     - data_type:
@@ -420,10 +427,11 @@ file:
     - keyword: RETURN
     - data_type:
         keyword: VARCHAR
-        bracketed:
-          start_bracket: (
-          numeric_literal: '10'
-          end_bracket: )
+        bracketed_arguments:
+          bracketed:
+            start_bracket: (
+            numeric_literal: '10'
+            end_bracket: )
     - keyword: AS
     - variable: res
     - data_type:
@@ -488,34 +496,38 @@ file:
       - naked_identifier: p1
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '6'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '6'
+              end_bracket: )
       - comma: ','
       - naked_identifier: p2
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '10'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '10'
+              end_bracket: )
       - end_bracket: )
     - keyword: RETURN
     - data_type:
         keyword: VARCHAR
-        bracketed:
-          start_bracket: (
-          numeric_literal: '20'
-          end_bracket: )
+        bracketed_arguments:
+          bracketed:
+            start_bracket: (
+            numeric_literal: '20'
+            end_bracket: )
     - keyword: IS
     - variable: res
     - data_type:
         keyword: VARCHAR
-        bracketed:
-          start_bracket: (
-          numeric_literal: '20'
-          end_bracket: )
+        bracketed_arguments:
+          bracketed:
+            start_bracket: (
+            numeric_literal: '20'
+            end_bracket: )
     - statement_terminator: ;
     - keyword: BEGIN
     - function_body:
diff --git a/test/fixtures/dialects/exasol/CreateLuaScriptBracket.yml b/test/fixtures/dialects/exasol/CreateLuaScriptBracket.yml
index e261816..998e2a4 100644
--- a/test/fixtures/dialects/exasol/CreateLuaScriptBracket.yml
+++ b/test/fixtures/dialects/exasol/CreateLuaScriptBracket.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 8fba3a9ad0d049673ca3be918a13aaf02bd6e1cfde411d727ec673f140da6859
+_hash: 5849678931aab62be6c402719b44bf6c15d605858133f49b435e6cd5c6a69f57
 file:
   statement:
     create_scripting_lua_script:
@@ -20,23 +20,23 @@ file:
     - keyword: ROWCOUNT
     - keyword: AS
     - script_content:
-      - raw: local
-      - raw: _stmt
+      - code: local
+      - code: _stmt
       - raw: '='
       - raw: '[[SOME ASSIGNMENT WITH OPEN BRACKET ( ]]'
-      - raw: x
+      - code: x
       - raw: '='
       - numeric_literal: '1'
-      - raw: local
-      - raw: _stmt
+      - code: local
+      - code: _stmt
       - raw: '='
-      - raw: _stmt
+      - code: _stmt
       - range_operator: ..
       - raw: '[[ ) ]]'
-      - raw: local
-      - raw: _nsted
+      - code: local
+      - code: _nsted
       - raw: '='
       - raw: '[=[one ([[two]] one]=]'
-      - raw: return
+      - code: return
       - numeric_literal: '1'
   function_script_terminator: /
diff --git a/test/fixtures/dialects/exasol/CreatePythonScalarScript.yml b/test/fixtures/dialects/exasol/CreatePythonScalarScript.yml
index e048b7b..c65e053 100644
--- a/test/fixtures/dialects/exasol/CreatePythonScalarScript.yml
+++ b/test/fixtures/dialects/exasol/CreatePythonScalarScript.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: bc371af29b8f949369fa476a81b009cf89d025b40ba61cda52e3b086c09e6c10
+_hash: 70cadc1549c0161178ee90989a721583de3c3d84a038e36ab0c299c2fbb0a25a
 file:
   statement:
     create_udf_script:
@@ -23,28 +23,31 @@ file:
           naked_identifier: JSON_STR
           data_type:
             keyword: VARCHAR
-            bracketed:
-              start_bracket: (
-              numeric_literal: '2000000'
-              end_bracket: )
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                numeric_literal: '2000000'
+                end_bracket: )
       - comma: ','
       - column_datatype_definition:
           naked_identifier: LANGUAGE_KEY
           data_type:
             keyword: VARCHAR
-            bracketed:
-              start_bracket: (
-              numeric_literal: '50'
-              end_bracket: )
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                numeric_literal: '50'
+                end_bracket: )
       - comma: ','
       - column_datatype_definition:
           naked_identifier: TXT_KEY
           data_type:
             keyword: VARCHAR
-            bracketed:
-              start_bracket: (
-              numeric_literal: '50'
-              end_bracket: )
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                numeric_literal: '50'
+                end_bracket: )
       - end_bracket: )
     - emits_segment:
         keyword: EMITS
@@ -54,10 +57,11 @@ file:
             naked_identifier: X
             data_type:
               keyword: VARCHAR
-              bracketed:
-                start_bracket: (
-                numeric_literal: '2000000'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  numeric_literal: '2000000'
+                  end_bracket: )
           end_bracket: )
     - keyword: AS
     - script_content:
@@ -65,32 +69,32 @@ file:
           \    e.g.:\n    SELECT MYSCHEMA.MYPYTHONSCRIPT(\n            '[{\""
       - raw: '@lang'
       - double_quote: '":"'
-      - raw: de
+      - code: de
       - raw: '-'
-      - raw: DE
+      - code: DE
       - double_quote: '","'
       - raw: $
       - double_quote: '":"'
-      - raw: Krztxt
+      - code: Krztxt
       - double_quote: '"}, {"'
       - raw: '@lang'
       - double_quote: '":"'
-      - raw: en
+      - code: en
       - raw: '-'
-      - raw: GB
+      - code: GB
       - double_quote: '","'
       - raw: $
       - double_quote: '":"'
-      - raw: Shrttxt
+      - code: Shrttxt
       - double_quote: "\"}]',\n            '@lang',\n            '$'\n        );\n\
           \ ====================================================================*/\n\
           \"\"\""
-      - raw: def
-      - raw: run
+      - code: def
+      - code: run
       - bracketed:
           start_bracket: (
-          raw: ctx
+          code: ctx
           end_bracket: )
       - raw: ':'
-      - raw: pass
+      - code: pass
   function_script_terminator: /
diff --git a/test/fixtures/dialects/exasol/CreateScriptingLuaScriptStatement1.yml b/test/fixtures/dialects/exasol/CreateScriptingLuaScriptStatement1.yml
index ba6aff3..91b3b09 100644
--- a/test/fixtures/dialects/exasol/CreateScriptingLuaScriptStatement1.yml
+++ b/test/fixtures/dialects/exasol/CreateScriptingLuaScriptStatement1.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 4dc08e0d689650d94babb10a98c0f26362fbb1b310d599db7af0ad5b134df2f3
+_hash: c6b3b602f42644ffabb641a6b69638725223a8bdc883138162c62d81270aa72f
 file:
 - statement:
     create_scripting_lua_script:
@@ -18,7 +18,7 @@ file:
       - naked_identifier: hello
     - keyword: AS
     - script_content:
-        raw: return
+        code: return
         single_quote: "'HELLO'"
 - function_script_terminator: /
 - statement:
@@ -34,6 +34,6 @@ file:
       - naked_identifier: world
     - keyword: AS
     - script_content:
-        raw: return
+        code: return
         single_quote: "'WORLD'"
 - function_script_terminator: /
diff --git a/test/fixtures/dialects/exasol/CreateScriptingLuaScriptStatement2.yml b/test/fixtures/dialects/exasol/CreateScriptingLuaScriptStatement2.yml
index 0d392fe..4da1ffa 100644
--- a/test/fixtures/dialects/exasol/CreateScriptingLuaScriptStatement2.yml
+++ b/test/fixtures/dialects/exasol/CreateScriptingLuaScriptStatement2.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 812f438105868e17820fc25d3e4b129bd7c83a3f0079fb30e5dea880498a1d56
+_hash: ac623c18e014b92b3b4fe3853fefff9fab178547864151bd583341e180cceb3b
 file:
   statement:
     create_scripting_lua_script:
@@ -21,6 +21,6 @@ file:
         end_bracket: )
     - keyword: AS
     - script_content:
-        raw: return
+        code: return
         single_quote: "'HELLO'"
   function_script_terminator: /
diff --git a/test/fixtures/dialects/exasol/CreateScriptingLuaScriptStatement3.yml b/test/fixtures/dialects/exasol/CreateScriptingLuaScriptStatement3.yml
index 3908e0c..b5f2776 100644
--- a/test/fixtures/dialects/exasol/CreateScriptingLuaScriptStatement3.yml
+++ b/test/fixtures/dialects/exasol/CreateScriptingLuaScriptStatement3.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 187c7880e639ffcad489e0a8a61fe7b67afdfed0aec79426ca1666ff4e0d039d
+_hash: 661aa876a8c58d47fd402f71ddf9df1cacc9b4c1b942a198feaa54eb880592f6
 file:
   statement:
     create_scripting_lua_script:
@@ -21,39 +21,39 @@ file:
       - end_bracket: )
     - keyword: AS
     - script_content:
-      - raw: import
+      - code: import
       - bracketed:
           start_bracket: (
           single_quote: "'function_lib'"
           end_bracket: )
-      - raw: lowest
+      - code: lowest
       - comma: ','
-      - raw: highest
+      - code: highest
       - raw: '='
-      - raw: function_lib
+      - code: function_lib
       - raw: .
-      - raw: min_max
+      - code: min_max
       - bracketed:
         - start_bracket: (
-        - raw: param1
+        - code: param1
         - comma: ','
-        - raw: param2
+        - code: param2
         - comma: ','
-        - raw: param3
+        - code: param3
         - end_bracket: )
-      - raw: query
+      - code: query
       - bracketed:
         - start_bracket: (
         - raw: '[[INSERT INTO t VALUES (:x, :y)]]'
         - comma: ','
         - start_curly_bracket: '{'
-        - raw: x
+        - code: x
         - raw: '='
-        - raw: lowest
+        - code: lowest
         - comma: ','
-        - raw: y
+        - code: y
         - raw: '='
-        - raw: highest
+        - code: highest
         - end_curly_bracket: '}'
         - end_bracket: )
   function_script_terminator: /
diff --git a/test/fixtures/dialects/exasol/CreateTableStatement.yml b/test/fixtures/dialects/exasol/CreateTableStatement.yml
index 124d206..b7eccbb 100644
--- a/test/fixtures/dialects/exasol/CreateTableStatement.yml
+++ b/test/fixtures/dialects/exasol/CreateTableStatement.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 0e42fd85a50adbe93a29b3dd9037595c4f09a3a7a5c67c84eebd2daa2369f7cd
+_hash: 899769991a0703ecd90c18a7162dc7f624521b1cebf46d87f2d18368c7461560
 file:
 - statement:
     create_table_statement:
@@ -21,10 +21,11 @@ file:
               naked_identifier: a
               data_type:
               - keyword: VARCHAR
-              - bracketed:
-                  start_bracket: (
-                  numeric_literal: '20'
-                  end_bracket: )
+              - bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '20'
+                    end_bracket: )
               - keyword: UTF8
       - comma: ','
       - table_content_definition:
@@ -33,12 +34,13 @@ file:
               naked_identifier: b
               data_type:
                 keyword: DECIMAL
-                bracketed:
-                - start_bracket: (
-                - numeric_literal: '24'
-                - comma: ','
-                - numeric_literal: '4'
-                - end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                  - start_bracket: (
+                  - numeric_literal: '24'
+                  - comma: ','
+                  - numeric_literal: '4'
+                  - end_bracket: )
             column_constraint_segment:
               table_constraint_definition:
               - keyword: NOT
@@ -288,10 +290,11 @@ file:
               naked_identifier: country
               data_type:
                 keyword: VARCHAR
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '40'
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '40'
+                    end_bracket: )
       - comma: ','
       - table_content_definition:
           table_constraint_definition:
@@ -360,10 +363,11 @@ file:
               naked_identifier: b
               data_type:
                 keyword: VARCHAR
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '20'
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '20'
+                    end_bracket: )
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -385,12 +389,13 @@ file:
               naked_identifier: ID
               data_type:
                 keyword: DECIMAL
-                bracketed:
-                - start_bracket: (
-                - numeric_literal: '18'
-                - comma: ','
-                - numeric_literal: '0'
-                - end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                  - start_bracket: (
+                  - numeric_literal: '18'
+                  - comma: ','
+                  - numeric_literal: '0'
+                  - end_bracket: )
             column_constraint_segment:
               keyword: IDENTITY
               table_constraint_definition:
@@ -424,12 +429,13 @@ file:
               naked_identifier: ID
               data_type:
                 keyword: DECIMAL
-                bracketed:
-                - start_bracket: (
-                - numeric_literal: '18'
-                - comma: ','
-                - numeric_literal: '0'
-                - end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                  - start_bracket: (
+                  - numeric_literal: '18'
+                  - comma: ','
+                  - numeric_literal: '0'
+                  - end_bracket: )
       - comma: ','
       - table_content_definition:
           column_definition:
@@ -437,10 +443,11 @@ file:
               naked_identifier: C1
               data_type:
                 keyword: CHAR
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '1'
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '1'
+                    end_bracket: )
       - comma: ','
       - table_content_definition:
           table_constraint_definition:
@@ -470,12 +477,13 @@ file:
               naked_identifier: ID
               data_type:
                 keyword: DECIMAL
-                bracketed:
-                - start_bracket: (
-                - numeric_literal: '18'
-                - comma: ','
-                - numeric_literal: '0'
-                - end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                  - start_bracket: (
+                  - numeric_literal: '18'
+                  - comma: ','
+                  - numeric_literal: '0'
+                  - end_bracket: )
       - comma: ','
       - table_content_definition:
           column_definition:
@@ -483,10 +491,11 @@ file:
               naked_identifier: C1
               data_type:
                 keyword: CHAR
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '1'
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '1'
+                    end_bracket: )
       - comma: ','
       - table_content_definition:
           table_constraint_definition:
@@ -517,10 +526,11 @@ file:
               naked_identifier: C1
               data_type:
                 keyword: CHAR
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '1'
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '1'
+                    end_bracket: )
             column_constraint_segment:
               table_constraint_definition:
               - keyword: CONSTRAINT
diff --git a/test/fixtures/dialects/exasol/CreateUDFScriptDotSyntax.yml b/test/fixtures/dialects/exasol/CreateUDFScriptDotSyntax.yml
index 1f25e0d..ab940e7 100644
--- a/test/fixtures/dialects/exasol/CreateUDFScriptDotSyntax.yml
+++ b/test/fixtures/dialects/exasol/CreateUDFScriptDotSyntax.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: fe6a0de1e7a84e6391aa1e748d0ba80606bb3b51c01ec820b0527d508f4a258e
+_hash: 2328598c88243090b7270d3bdf4abfc8ec62e8968f0c29868df6e1a25dd96655
 file:
   statement:
     create_udf_script:
@@ -25,20 +25,20 @@ file:
           end_bracket: )
     - keyword: AS
     - script_content:
-      - raw: def
-      - raw: run
+      - code: def
+      - code: run
       - bracketed:
           start_bracket: (
-          raw: ctx
+          code: ctx
           end_bracket: )
       - raw: ':'
-      - raw: ctx
+      - code: ctx
       - raw: .
-      - raw: emit
+      - code: emit
       - bracketed:
         - start_bracket: (
-        - raw: 'True'
+        - code: 'True'
         - comma: ','
-        - raw: 'False'
+        - code: 'False'
         - end_bracket: )
   function_script_terminator: /
diff --git a/test/fixtures/dialects/exasol/CreateUDFScriptStatement1.yml b/test/fixtures/dialects/exasol/CreateUDFScriptStatement1.yml
index b16fb1a..6f8991e 100644
--- a/test/fixtures/dialects/exasol/CreateUDFScriptStatement1.yml
+++ b/test/fixtures/dialects/exasol/CreateUDFScriptStatement1.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 46272d132a2fa57493194874d7f88c52519771bec51ef821e1c31bbb33d4afb4
+_hash: a1731e61d1110ed23395aab516655bdfc035804b79aa6e109dc032043c38700a
 file:
 - statement:
     create_udf_script:
@@ -35,44 +35,44 @@ file:
         keyword: DOUBLE
     - keyword: AS
     - script_content:
-      - raw: function
-      - raw: run
+      - code: function
+      - code: run
       - bracketed:
           start_bracket: (
-          raw: ctx
+          code: ctx
           end_bracket: )
-      - raw: if
-      - raw: ctx
+      - code: if
+      - code: ctx
       - raw: .
-      - raw: a
+      - code: a
       - raw: '='
       - raw: '='
-      - raw: nil
-      - raw: or
-      - raw: ctx
+      - code: nil
+      - code: or
+      - code: ctx
       - raw: .
-      - raw: b
+      - code: b
       - raw: '='
       - raw: '='
-      - raw: nil
-      - raw: then
-      - raw: return
-      - raw: 'NULL'
-      - raw: end
-      - raw: return
+      - code: nil
+      - code: then
+      - code: return
+      - code: 'NULL'
+      - code: end
+      - code: return
       - bracketed:
         - start_bracket: (
-        - raw: ctx
+        - code: ctx
         - raw: .
-        - raw: a
+        - code: a
         - raw: +
-        - raw: ctx
+        - code: ctx
         - raw: .
-        - raw: b
+        - code: b
         - end_bracket: )
       - raw: /
       - numeric_literal: '2'
-      - raw: end
+      - code: end
 - function_script_terminator: /
 - statement:
     create_udf_script:
@@ -104,47 +104,47 @@ file:
         keyword: DOUBLE
     - keyword: AS
     - script_content:
-      - raw: function
-      - raw: run
+      - code: function
+      - code: run
       - bracketed:
           start_bracket: (
-          raw: ctx
+          code: ctx
           end_bracket: )
-      - raw: if
-      - raw: ctx
+      - code: if
+      - code: ctx
       - raw: .
-      - raw: a
+      - code: a
       - raw: '='
       - raw: '='
-      - raw: nil
-      - raw: or
-      - raw: ctx
+      - code: nil
+      - code: or
+      - code: ctx
       - raw: .
-      - raw: b
+      - code: b
       - raw: '='
       - raw: '='
-      - raw: nil
-      - raw: then
-      - raw: return
-      - raw: 'NULL'
-      - raw: end
-      - raw: x
+      - code: nil
+      - code: then
+      - code: return
+      - code: 'NULL'
+      - code: end
+      - code: x
       - raw: '='
       - numeric_literal: '10'
       - raw: /
       - numeric_literal: '2'
-      - raw: return
+      - code: return
       - bracketed:
         - start_bracket: (
-        - raw: ctx
+        - code: ctx
         - raw: .
-        - raw: a
+        - code: a
         - raw: +
-        - raw: ctx
+        - code: ctx
         - raw: .
-        - raw: b
+        - code: b
         - end_bracket: )
       - raw: /
       - numeric_literal: '2'
-      - raw: end
+      - code: end
 - function_script_terminator: /
diff --git a/test/fixtures/dialects/exasol/CreateUDFScriptStatement2.yml b/test/fixtures/dialects/exasol/CreateUDFScriptStatement2.yml
index 3df2534..269f729 100644
--- a/test/fixtures/dialects/exasol/CreateUDFScriptStatement2.yml
+++ b/test/fixtures/dialects/exasol/CreateUDFScriptStatement2.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 86052bc2315a44a9fcd00b91f5e2755f4fc19b6d90681003caecde2fc5efaa43
+_hash: 817327c1c70904715f4d2304fc74a2cfc8a95e8e279203b30f08bc4e08337296
 file:
   statement:
     create_udf_script:
@@ -19,10 +19,11 @@ file:
           naked_identifier: w
           data_type:
             keyword: varchar
-            bracketed:
-              start_bracket: (
-              numeric_literal: '10000'
-              end_bracket: )
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                numeric_literal: '10000'
+                end_bracket: )
         end_bracket: )
     - emits_segment:
         keyword: EMITS
@@ -32,57 +33,58 @@ file:
             naked_identifier: words
             data_type:
               keyword: varchar
-              bracketed:
-                start_bracket: (
-                numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  numeric_literal: '100'
+                  end_bracket: )
           end_bracket: )
     - keyword: AS
     - script_content:
-      - raw: function
-      - raw: run
+      - code: function
+      - code: run
       - bracketed:
           start_bracket: (
-          raw: ctx
+          code: ctx
           end_bracket: )
-      - raw: local
-      - raw: word
+      - code: local
+      - code: word
       - raw: '='
-      - raw: ctx
+      - code: ctx
       - raw: .
-      - raw: w
-      - raw: if
+      - code: w
+      - code: if
       - bracketed:
         - start_bracket: (
-        - raw: word
+        - code: word
         - like_operator: '~'
         - raw: '='
-        - raw: 'null'
+        - code: 'null'
         - end_bracket: )
-      - raw: then
-      - raw: for
-      - raw: i
-      - raw: in
-      - raw: unicode
+      - code: then
+      - code: for
+      - code: i
+      - code: in
+      - code: unicode
       - raw: .
-      - raw: utf8
+      - code: utf8
       - raw: .
-      - raw: gmatch
+      - code: gmatch
       - bracketed:
           start_bracket: (
-          raw: word
+          code: word
           comma: ','
           single_quote: "'([%w%p]+)'"
           end_bracket: )
-      - raw: do
-      - raw: ctx
+      - code: do
+      - code: ctx
       - raw: .
-      - raw: emit
+      - code: emit
       - bracketed:
           start_bracket: (
-          raw: i
+          code: i
           end_bracket: )
-      - raw: end
-      - raw: end
-      - raw: end
+      - code: end
+      - code: end
+      - code: end
   function_script_terminator: /
diff --git a/test/fixtures/dialects/exasol/CreateUDFScriptStatement3.yml b/test/fixtures/dialects/exasol/CreateUDFScriptStatement3.yml
index 3b7ed98..76b948f 100644
--- a/test/fixtures/dialects/exasol/CreateUDFScriptStatement3.yml
+++ b/test/fixtures/dialects/exasol/CreateUDFScriptStatement3.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: b7b362ba5c75e1cbb8085d85603dd790a88a88556769e7657142f83a9827c91e
+_hash: 30ca3566a9d1f58588b8995fd797b318f9ed602597266f8c1ca27ddcf6316191
 file:
   statement:
     create_udf_script:
@@ -25,12 +25,12 @@ file:
         keyword: INT
     - keyword: AS
     - script_content:
-      - raw: def
-      - raw: helloWorld
+      - code: def
+      - code: helloWorld
       - bracketed:
           start_bracket: (
           end_bracket: )
       - raw: ':'
-      - raw: return
+      - code: return
       - double_quote: '"Hello Python3 World!"'
   function_script_terminator: /
diff --git a/test/fixtures/dialects/exasol/CreateUDFScriptStatement4.yml b/test/fixtures/dialects/exasol/CreateUDFScriptStatement4.yml
index c54a71e..1559e4b 100644
--- a/test/fixtures/dialects/exasol/CreateUDFScriptStatement4.yml
+++ b/test/fixtures/dialects/exasol/CreateUDFScriptStatement4.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 4052e3a94919da01eaac14af07687ab6822cc1c2429e9c9adaae17eaa401b7b7
+_hash: 43864218bc65bd6350e9e855ee29ffa86b6e79868b89368ac9081f07e2c942f4
 file:
   statement:
     create_udf_script:
@@ -23,32 +23,33 @@ file:
     - keyword: RETURNS
     - data_type:
         keyword: VARCHAR
-        bracketed:
-          start_bracket: (
-          numeric_literal: '2000'
-          end_bracket: )
+        bracketed_arguments:
+          bracketed:
+            start_bracket: (
+            numeric_literal: '2000'
+            end_bracket: )
     - keyword: AS
     - script_content:
-      - raw: l
+      - code: l
       - raw: '='
-      - raw: exa
+      - code: exa
       - raw: .
-      - raw: import_script
+      - code: import_script
       - bracketed:
           start_bracket: (
           single_quote: "'LIB.MYLIB'"
           end_bracket: )
-      - raw: def
-      - raw: run
+      - code: def
+      - code: run
       - bracketed:
           start_bracket: (
-          raw: ctx
+          code: ctx
           end_bracket: )
       - raw: ':'
-      - raw: return
-      - raw: l
+      - code: return
+      - code: l
       - raw: .
-      - raw: helloWorld
+      - code: helloWorld
       - bracketed:
           start_bracket: (
           end_bracket: )
diff --git a/test/fixtures/dialects/exasol/CreateUDFScriptStatement5.yml b/test/fixtures/dialects/exasol/CreateUDFScriptStatement5.yml
index 08c151e..0a898c1 100644
--- a/test/fixtures/dialects/exasol/CreateUDFScriptStatement5.yml
+++ b/test/fixtures/dialects/exasol/CreateUDFScriptStatement5.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 7ee18c4b7ed0dcd5468b9d8f25df98314ea7d2afa636921faadab1f8f650f7f4
+_hash: 516b99c05cfd78c9be40a0abc907b247f8acd0357f0512e5ac520936284a06eb
 file:
   statement:
     create_udf_script:
@@ -23,23 +23,24 @@ file:
     - keyword: RETURNS
     - data_type:
         keyword: VARCHAR
-        bracketed:
-          start_bracket: (
-          numeric_literal: '2000'
-          end_bracket: )
+        bracketed_arguments:
+          bracketed:
+            start_bracket: (
+            numeric_literal: '2000'
+            end_bracket: )
     - keyword: AS
     - script_content:
-      - raw: class
-      - raw: MYLIB
+      - code: class
+      - code: MYLIB
       - start_curly_bracket: '{'
-      - raw: static
-      - raw: String
-      - raw: helloWorld
+      - code: static
+      - code: String
+      - code: helloWorld
       - bracketed:
           start_bracket: (
           end_bracket: )
       - start_curly_bracket: '{'
-      - raw: return
+      - code: return
       - double_quote: '"Hello Java World!"'
       - raw: ;
       - end_curly_bracket: '}'
diff --git a/test/fixtures/dialects/exasol/DataTypeTest.yml b/test/fixtures/dialects/exasol/DataTypeTest.yml
index 90ee519..bb8fa09 100644
--- a/test/fixtures/dialects/exasol/DataTypeTest.yml
+++ b/test/fixtures/dialects/exasol/DataTypeTest.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: f3784e1b3ce39b615798a7d2d7404069755635626ee4eaa2404fc106f68475a1
+_hash: 1145859a4e145cf9c986fec7cf06740385f18279926d2013bca49b17377c8f26
 file:
 - statement:
     create_table_statement:
@@ -35,10 +35,11 @@ file:
               naked_identifier: c1
               data_type:
                 keyword: DECIMAL
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '10'
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '10'
+                    end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -55,12 +56,13 @@ file:
               naked_identifier: c1
               data_type:
                 keyword: DECIMAL
-                bracketed:
-                - start_bracket: (
-                - numeric_literal: '10'
-                - comma: ','
-                - numeric_literal: '2'
-                - end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                  - start_bracket: (
+                  - numeric_literal: '10'
+                  - comma: ','
+                  - numeric_literal: '2'
+                  - end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -77,12 +79,13 @@ file:
               naked_identifier: c1
               data_type:
                 keyword: DEC
-                bracketed:
-                - start_bracket: (
-                - numeric_literal: '10'
-                - comma: ','
-                - numeric_literal: '2'
-                - end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                  - start_bracket: (
+                  - numeric_literal: '10'
+                  - comma: ','
+                  - numeric_literal: '2'
+                  - end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -99,10 +102,11 @@ file:
               naked_identifier: c1
               data_type:
                 keyword: NUMERIC
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '10'
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '10'
+                    end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -119,12 +123,13 @@ file:
               naked_identifier: c1
               data_type:
                 keyword: NUMBER
-                bracketed:
-                - start_bracket: (
-                - numeric_literal: '10'
-                - comma: ','
-                - numeric_literal: '2'
-                - end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                  - start_bracket: (
+                  - numeric_literal: '10'
+                  - comma: ','
+                  - numeric_literal: '2'
+                  - end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -387,10 +392,11 @@ file:
               data_type:
               - keyword: INTERVAL
               - keyword: YEAR
-              - bracketed:
-                  start_bracket: (
-                  numeric_literal: '1'
-                  end_bracket: )
+              - bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '1'
+                    end_bracket: )
               - keyword: TO
               - keyword: MONTH
         end_bracket: )
@@ -410,16 +416,18 @@ file:
               data_type:
               - keyword: INTERVAL
               - keyword: DAY
-              - bracketed:
-                  start_bracket: (
-                  numeric_literal: '2'
-                  end_bracket: )
+              - bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '2'
+                    end_bracket: )
               - keyword: TO
               - keyword: SECOND
-              - bracketed:
-                  start_bracket: (
-                  numeric_literal: '1'
-                  end_bracket: )
+              - bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '1'
+                    end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -436,10 +444,11 @@ file:
               naked_identifier: c1
               data_type:
                 keyword: GEOMETRY
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '1000'
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '1000'
+                    end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -472,11 +481,12 @@ file:
               naked_identifier: c1
               data_type:
                 keyword: HASHTYPE
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '8'
-                  keyword: BYTE
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '8'
+                    keyword: BYTE
+                    end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -493,11 +503,12 @@ file:
               naked_identifier: c1
               data_type:
                 keyword: HASHTYPE
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '8'
-                  keyword: BIT
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '8'
+                    keyword: BIT
+                    end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -514,10 +525,11 @@ file:
               naked_identifier: c1
               data_type:
                 keyword: CHAR
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '1'
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '1'
+                    end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -535,10 +547,11 @@ file:
               data_type:
               - keyword: CHAR
               - keyword: VARYING
-              - bracketed:
-                  start_bracket: (
-                  numeric_literal: '1'
-                  end_bracket: )
+              - bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '1'
+                    end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -555,11 +568,12 @@ file:
               naked_identifier: c1
               data_type:
                 keyword: VARCHAR
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '2000'
-                  keyword: CHAR
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '2000'
+                    keyword: CHAR
+                    end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -576,10 +590,11 @@ file:
               naked_identifier: c1
               data_type:
                 keyword: VARCHAR2
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '2000'
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '2000'
+                    end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -596,11 +611,12 @@ file:
               naked_identifier: c1
               data_type:
                 keyword: VARCHAR
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '2000'
-                  keyword: BYTE
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '2000'
+                    keyword: BYTE
+                    end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -636,10 +652,11 @@ file:
               - keyword: CHARACTER
               - keyword: LARGE
               - keyword: OBJECT
-              - bracketed:
-                  start_bracket: (
-                  numeric_literal: '1000'
-                  end_bracket: )
+              - bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '1000'
+                    end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -657,10 +674,11 @@ file:
               data_type:
               - keyword: CHARACTER
               - keyword: VARYING
-              - bracketed:
-                  start_bracket: (
-                  numeric_literal: '1000'
-                  end_bracket: )
+              - bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '1000'
+                    end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -677,10 +695,11 @@ file:
               naked_identifier: c1
               data_type:
                 keyword: CLOB
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '2000'
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '2000'
+                    end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -697,10 +716,11 @@ file:
               naked_identifier: c1
               data_type:
               - keyword: CLOB
-              - bracketed:
-                  start_bracket: (
-                  numeric_literal: '2000'
-                  end_bracket: )
+              - bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '2000'
+                    end_bracket: )
               - keyword: ASCII
         end_bracket: )
 - statement_terminator: ;
@@ -718,11 +738,12 @@ file:
               naked_identifier: c1
               data_type:
               - keyword: VARCHAR
-              - bracketed:
-                  start_bracket: (
-                  numeric_literal: '2000'
-                  keyword: CHAR
-                  end_bracket: )
+              - bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '2000'
+                    keyword: CHAR
+                    end_bracket: )
               - keyword: UTF8
         end_bracket: )
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/exasol/MergeStatement.yml b/test/fixtures/dialects/exasol/MergeStatement.yml
index f5396ca..4009e49 100644
--- a/test/fixtures/dialects/exasol/MergeStatement.yml
+++ b/test/fixtures/dialects/exasol/MergeStatement.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: cf69f6048a644afca395a0752a97e0e6780fb56107cf14f96e1ff0e2fe09cd20
+_hash: 2d927806faa8c9349e523b0471fd1654957ff55219f34e5aafa17bb1588c8352
 file:
 - statement:
     merge_statement:
@@ -81,25 +81,26 @@ file:
         - keyword: MATCHED
         - keyword: THEN
         - merge_insert_clause:
-          - keyword: INSERT
-          - keyword: VALUES
-          - bracketed:
-            - start_bracket: (
-            - expression:
-                column_reference:
-                - naked_identifier: U
-                - dot: .
-                - naked_identifier: name
-            - comma: ','
-            - expression:
-                column_reference:
-                - naked_identifier: U
-                - dot: .
-                - naked_identifier: salary
-            - comma: ','
-            - expression:
-                bare_function: CURRENT_DATE
-            - end_bracket: )
+            keyword: INSERT
+            values_clause:
+              keyword: VALUES
+              bracketed:
+              - start_bracket: (
+              - expression:
+                  column_reference:
+                  - naked_identifier: U
+                  - dot: .
+                  - naked_identifier: name
+              - comma: ','
+              - expression:
+                  column_reference:
+                  - naked_identifier: U
+                  - dot: .
+                  - naked_identifier: salary
+              - comma: ','
+              - expression:
+                  bare_function: CURRENT_DATE
+              - end_bracket: )
 - statement_terminator: ;
 - statement:
     merge_statement:
@@ -196,19 +197,20 @@ file:
         - keyword: MATCHED
         - keyword: THEN
         - merge_insert_clause:
-          - keyword: INSERT
-          - keyword: VALUES
-          - bracketed:
-            - start_bracket: (
-            - expression:
-                numeric_literal: '1'
-            - comma: ','
-            - expression:
-                numeric_literal: '2'
-            - comma: ','
-            - expression:
-                numeric_literal: '3'
-            - end_bracket: )
+            keyword: INSERT
+            values_clause:
+              keyword: VALUES
+              bracketed:
+              - start_bracket: (
+              - expression:
+                  numeric_literal: '1'
+              - comma: ','
+              - expression:
+                  numeric_literal: '2'
+              - comma: ','
+              - expression:
+                  numeric_literal: '3'
+              - end_bracket: )
         merge_when_matched_clause:
         - keyword: WHEN
         - keyword: MATCHED
diff --git a/test/fixtures/dialects/exasol/SelectStatement.yml b/test/fixtures/dialects/exasol/SelectStatement.yml
index a226a51..fc7a036 100644
--- a/test/fixtures/dialects/exasol/SelectStatement.yml
+++ b/test/fixtures/dialects/exasol/SelectStatement.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 413a3e631dda3ac322923173545eb45268a3051bb70d6e94ed812fefef1b3c04
+_hash: 1d9c8d0de467df4cb21269fda3e771ddfd90848dde0ec3b60bb6dfd7782f1438
 file:
 - statement:
     select_statement:
@@ -323,10 +323,11 @@ file:
                         naked_identifier: v
                         data_type:
                           keyword: VARCHAR
-                          bracketed:
-                            start_bracket: (
-                            numeric_literal: '1'
-                            end_bracket: )
+                          bracketed_arguments:
+                            bracketed:
+                              start_bracket: (
+                              numeric_literal: '1'
+                              end_bracket: )
                     end_bracket: )
                 - import_from_clause:
                     keyword: FROM
@@ -980,19 +981,21 @@ file:
                   naked_identifier: id
                   data_type:
                     keyword: VARCHAR
-                    bracketed:
-                      start_bracket: (
-                      numeric_literal: '2000'
-                      end_bracket: )
+                    bracketed_arguments:
+                      bracketed:
+                        start_bracket: (
+                        numeric_literal: '2000'
+                        end_bracket: )
               - comma: ','
               - column_datatype_definition:
                   naked_identifier: error_column
                   data_type:
                     keyword: VARCHAR
-                    bracketed:
-                      start_bracket: (
-                      numeric_literal: '2000000'
-                      end_bracket: )
+                    bracketed_arguments:
+                      bracketed:
+                        start_bracket: (
+                        numeric_literal: '2000000'
+                        end_bracket: )
               - end_bracket: )
       from_clause:
         keyword: FROM
diff --git a/test/fixtures/dialects/greenplum/.sqlfluff b/test/fixtures/dialects/greenplum/.sqlfluff
new file mode 100644
index 0000000..366b4f8
--- /dev/null
+++ b/test/fixtures/dialects/greenplum/.sqlfluff
@@ -0,0 +1,2 @@
+[sqlfluff]
+dialect = greenplum
diff --git a/test/fixtures/dialects/greenplum/create_table.sql b/test/fixtures/dialects/greenplum/create_table.sql
new file mode 100644
index 0000000..ef3b2b9
--- /dev/null
+++ b/test/fixtures/dialects/greenplum/create_table.sql
@@ -0,0 +1,40 @@
+CREATE TABLE measurement (
+city_id int NOT NULL,
+logdate date NOT NULL,
+peaktemp int,
+unitsales int
+) WITH (appendoptimized=true, compresslevel=5)
+DISTRIBUTED BY (txn_id);
+
+
+CREATE TABLE measurement (
+city_id int NOT NULL,
+logdate date NOT NULL,
+peaktemp int,
+unitsales int
+) WITH (appendoptimized=true)
+DISTRIBUTED BY (txn_id);
+
+
+CREATE TABLE test (
+test_id int NOT NULL,
+logdate date NOT NULL,
+test_text int
+)
+DISTRIBUTED BY (txn_id);
+
+
+CREATE TABLE test_randomly (
+test_id int NOT NULL,
+logdate date NOT NULL,
+test_text int
+)
+DISTRIBUTED RANDOMLY;
+
+CREATE TABLE test_replicated (
+test_id int NOT NULL,
+logdate date NOT NULL,
+test_text int
+)
+DISTRIBUTED REPLICATED;
+
diff --git a/test/fixtures/dialects/greenplum/create_table.yml b/test/fixtures/dialects/greenplum/create_table.yml
new file mode 100644
index 0000000..9923e2d
--- /dev/null
+++ b/test/fixtures/dialects/greenplum/create_table.yml
@@ -0,0 +1,218 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: ecca9dc51e314232f376fe5511822c602c567bd05498720e65030ff59788b7f3
+file:
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: measurement
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: city_id
+      - data_type:
+          keyword: int
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - comma: ','
+      - column_reference:
+          naked_identifier: logdate
+      - data_type:
+          datetime_type_identifier:
+            keyword: date
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - comma: ','
+      - column_reference:
+          naked_identifier: peaktemp
+      - data_type:
+          keyword: int
+      - comma: ','
+      - column_reference:
+          naked_identifier: unitsales
+      - data_type:
+          keyword: int
+      - end_bracket: )
+    - keyword: WITH
+    - bracketed:
+      - start_bracket: (
+      - parameter: appendoptimized
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - boolean_literal: 'true'
+      - comma: ','
+      - parameter: compresslevel
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - numeric_literal: '5'
+      - end_bracket: )
+    - keyword: DISTRIBUTED
+    - keyword: BY
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: txn_id
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: measurement
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: city_id
+      - data_type:
+          keyword: int
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - comma: ','
+      - column_reference:
+          naked_identifier: logdate
+      - data_type:
+          datetime_type_identifier:
+            keyword: date
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - comma: ','
+      - column_reference:
+          naked_identifier: peaktemp
+      - data_type:
+          keyword: int
+      - comma: ','
+      - column_reference:
+          naked_identifier: unitsales
+      - data_type:
+          keyword: int
+      - end_bracket: )
+    - keyword: WITH
+    - bracketed:
+        start_bracket: (
+        parameter: appendoptimized
+        comparison_operator:
+          raw_comparison_operator: '='
+        boolean_literal: 'true'
+        end_bracket: )
+    - keyword: DISTRIBUTED
+    - keyword: BY
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: txn_id
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: test
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: test_id
+      - data_type:
+          keyword: int
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - comma: ','
+      - column_reference:
+          naked_identifier: logdate
+      - data_type:
+          datetime_type_identifier:
+            keyword: date
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - comma: ','
+      - column_reference:
+          naked_identifier: test_text
+      - data_type:
+          keyword: int
+      - end_bracket: )
+    - keyword: DISTRIBUTED
+    - keyword: BY
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: txn_id
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: test_randomly
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: test_id
+      - data_type:
+          keyword: int
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - comma: ','
+      - column_reference:
+          naked_identifier: logdate
+      - data_type:
+          datetime_type_identifier:
+            keyword: date
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - comma: ','
+      - column_reference:
+          naked_identifier: test_text
+      - data_type:
+          keyword: int
+      - end_bracket: )
+    - keyword: DISTRIBUTED
+    - keyword: RANDOMLY
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: test_replicated
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: test_id
+      - data_type:
+          keyword: int
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - comma: ','
+      - column_reference:
+          naked_identifier: logdate
+      - data_type:
+          datetime_type_identifier:
+            keyword: date
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - comma: ','
+      - column_reference:
+          naked_identifier: test_text
+      - data_type:
+          keyword: int
+      - end_bracket: )
+    - keyword: DISTRIBUTED
+    - keyword: REPLICATED
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/hive/array_types.yml b/test/fixtures/dialects/hive/array_types.yml
index 4df22b7..cdc466e 100644
--- a/test/fixtures/dialects/hive/array_types.yml
+++ b/test/fixtures/dialects/hive/array_types.yml
@@ -3,25 +3,27 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: ae7c029c61edcc47c841a14b2575a6025ddafe6b24eb425b83a25f5241c2b741
+_hash: 703561cc4a19243823ae4bb0768e333f8a24d41beb5f3f9cb4406b66ac180ced
 file:
 - statement:
     select_statement:
       select_clause:
         keyword: select
         select_clause_element:
-          array_literal:
-          - keyword: array
-          - start_square_bracket: '['
-          - column_reference:
-              naked_identifier: a
-          - comma: ','
-          - column_reference:
-              naked_identifier: b
-          - comma: ','
-          - column_reference:
-              naked_identifier: c
-          - end_square_bracket: ']'
+          typed_array_literal:
+            array_type:
+              keyword: array
+            array_literal:
+            - start_square_bracket: '['
+            - column_reference:
+                naked_identifier: a
+            - comma: ','
+            - column_reference:
+                naked_identifier: b
+            - comma: ','
+            - column_reference:
+                naked_identifier: c
+            - end_square_bracket: ']'
           alias_expression:
             keyword: as
             naked_identifier: arr
@@ -55,17 +57,19 @@ file:
                 bracketed:
                   start_bracket: (
                   expression:
-                    array_literal:
-                    - keyword: array
-                    - start_square_bracket: '['
-                    - numeric_literal: '1'
-                    - comma: ','
-                    - numeric_literal: '3'
-                    - comma: ','
-                    - numeric_literal: '6'
-                    - comma: ','
-                    - numeric_literal: '12'
-                    - end_square_bracket: ']'
+                    typed_array_literal:
+                      array_type:
+                        keyword: array
+                      array_literal:
+                      - start_square_bracket: '['
+                      - numeric_literal: '1'
+                      - comma: ','
+                      - numeric_literal: '3'
+                      - comma: ','
+                      - numeric_literal: '6'
+                      - comma: ','
+                      - numeric_literal: '12'
+                      - end_square_bracket: ']'
                   end_bracket: )
             alias_expression:
               keyword: as
@@ -87,35 +91,37 @@ file:
             bracketed:
               start_bracket: (
               expression:
-                array_literal:
-                - keyword: array
-                - start_square_bracket: '['
-                - function:
-                    function_name:
-                      function_name_identifier: row
-                    bracketed:
-                    - start_bracket: (
-                    - expression:
-                        quoted_literal: "'pending.freebet'"
-                    - comma: ','
-                    - expression:
-                        column_reference:
-                          naked_identifier: pending_fb
-                    - end_bracket: )
-                - comma: ','
-                - function:
-                    function_name:
-                      function_name_identifier: row
-                    bracketed:
-                    - start_bracket: (
-                    - expression:
-                        quoted_literal: "'bonus.balance'"
-                    - comma: ','
-                    - expression:
-                        column_reference:
-                          naked_identifier: bonus
-                    - end_bracket: )
-                - end_square_bracket: ']'
+                typed_array_literal:
+                  array_type:
+                    keyword: array
+                  array_literal:
+                  - start_square_bracket: '['
+                  - function:
+                      function_name:
+                        function_name_identifier: row
+                      bracketed:
+                      - start_bracket: (
+                      - expression:
+                          quoted_literal: "'pending.freebet'"
+                      - comma: ','
+                      - expression:
+                          column_reference:
+                            naked_identifier: pending_fb
+                      - end_bracket: )
+                  - comma: ','
+                  - function:
+                      function_name:
+                        function_name_identifier: row
+                      bracketed:
+                      - start_bracket: (
+                      - expression:
+                          quoted_literal: "'bonus.balance'"
+                      - comma: ','
+                      - expression:
+                          column_reference:
+                            naked_identifier: bonus
+                      - end_bracket: )
+                  - end_square_bracket: ']'
               end_bracket: )
       from_clause:
         keyword: from
@@ -132,15 +138,17 @@ file:
       select_clause:
         keyword: select
         select_clause_element:
-          array_literal:
-          - keyword: array
-          - start_square_bracket: '['
-          - quoted_literal: "'a'"
-          - comma: ','
-          - quoted_literal: "'b'"
-          - comma: ','
-          - quoted_literal: "'c'"
-          - end_square_bracket: ']'
+          typed_array_literal:
+            array_type:
+              keyword: array
+            array_literal:
+            - start_square_bracket: '['
+            - quoted_literal: "'a'"
+            - comma: ','
+            - quoted_literal: "'b'"
+            - comma: ','
+            - quoted_literal: "'c'"
+            - end_square_bracket: ']'
           alias_expression:
             keyword: as
             naked_identifier: arr
@@ -159,13 +167,15 @@ file:
       select_clause:
         keyword: select
         select_clause_element:
-          array_literal:
-            keyword: array
-            start_square_bracket: '['
-            quoted_literal: "'a'"
-            comma: ','
-            null_literal: 'null'
-            end_square_bracket: ']'
+          typed_array_literal:
+            array_type:
+              keyword: array
+            array_literal:
+              start_square_bracket: '['
+              quoted_literal: "'a'"
+              comma: ','
+              null_literal: 'null'
+              end_square_bracket: ']'
           alias_expression:
             keyword: as
             naked_identifier: arr
@@ -184,10 +194,12 @@ file:
       select_clause:
         keyword: select
         select_clause_element:
-          array_literal:
-            keyword: array
-            start_square_bracket: '['
-            end_square_bracket: ']'
+          typed_array_literal:
+            array_type:
+              keyword: array
+            array_literal:
+              start_square_bracket: '['
+              end_square_bracket: ']'
           alias_expression:
             keyword: as
             naked_identifier: arr
diff --git a/test/fixtures/dialects/hive/create_table_constraints.sql b/test/fixtures/dialects/hive/create_table_constraints.sql
index 7bb8e93..5a25702 100644
--- a/test/fixtures/dialects/hive/create_table_constraints.sql
+++ b/test/fixtures/dialects/hive/create_table_constraints.sql
@@ -7,3 +7,31 @@ CREATE TABLE foo(
 )
 COMMENT 'This is a test table'
 STORED AS ORC;
+
+CREATE TABLE product
+  (
+     product_id        INTEGER,
+     product_vendor_id INTEGER,
+     PRIMARY KEY (product_id)  DISABLE NOVALIDATE,
+     CONSTRAINT product_fk_1 FOREIGN KEY (product_vendor_id) REFERENCES vendor(vendor_id)  DISABLE NOVALIDATE
+  );
+
+CREATE TABLE vendor
+  (
+     vendor_id INTEGER,
+     PRIMARY KEY (vendor_id)  DISABLE NOVALIDATE RELY
+  );
+
+CREATE TABLE product
+  (
+     product_id        INTEGER,
+     product_vendor_id INTEGER,
+     PRIMARY KEY (product_id)  DISABLE NOVALIDATE,
+     CONSTRAINT product_fk_1 FOREIGN KEY (product_vendor_id) REFERENCES vendor(vendor_id)  DISABLE NOVALIDATE
+  );
+
+CREATE TABLE vendor
+  (
+     vendor_id INTEGER,
+     PRIMARY KEY (vendor_id)  DISABLE NOVALIDATE NORELY
+  );
diff --git a/test/fixtures/dialects/hive/create_table_constraints.yml b/test/fixtures/dialects/hive/create_table_constraints.yml
index 32ae0c1..181dc81 100644
--- a/test/fixtures/dialects/hive/create_table_constraints.yml
+++ b/test/fixtures/dialects/hive/create_table_constraints.yml
@@ -3,9 +3,9 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 06b564929ba88492d7ffef906cbb94b2c4bc6d4ab8b85168216445715d867c98
+_hash: 33362b518a68ba15d8346bcee14783b8dc1efcf5370199f667d53e4095821211
 file:
-  statement:
+- statement:
     create_table_statement:
     - keyword: CREATE
     - keyword: TABLE
@@ -61,4 +61,166 @@ file:
     - keyword: STORED
     - keyword: AS
     - keyword: ORC
-  statement_terminator: ;
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: product
+    - bracketed:
+      - start_bracket: (
+      - column_definition:
+          naked_identifier: product_id
+          data_type:
+            primitive_type:
+              keyword: INTEGER
+      - comma: ','
+      - column_definition:
+          naked_identifier: product_vendor_id
+          data_type:
+            primitive_type:
+              keyword: INTEGER
+      - comma: ','
+      - table_constraint:
+        - keyword: PRIMARY
+        - keyword: KEY
+        - bracketed:
+            start_bracket: (
+            column_reference:
+              naked_identifier: product_id
+            end_bracket: )
+        - keyword: DISABLE
+        - keyword: NOVALIDATE
+      - comma: ','
+      - table_constraint:
+        - keyword: CONSTRAINT
+        - object_reference:
+            naked_identifier: product_fk_1
+        - keyword: FOREIGN
+        - keyword: KEY
+        - bracketed:
+            start_bracket: (
+            column_reference:
+              naked_identifier: product_vendor_id
+            end_bracket: )
+        - keyword: REFERENCES
+        - table_reference:
+            naked_identifier: vendor
+        - bracketed:
+            start_bracket: (
+            column_reference:
+              naked_identifier: vendor_id
+            end_bracket: )
+        - keyword: DISABLE
+        - keyword: NOVALIDATE
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: vendor
+    - bracketed:
+        start_bracket: (
+        column_definition:
+          naked_identifier: vendor_id
+          data_type:
+            primitive_type:
+              keyword: INTEGER
+        comma: ','
+        table_constraint:
+        - keyword: PRIMARY
+        - keyword: KEY
+        - bracketed:
+            start_bracket: (
+            column_reference:
+              naked_identifier: vendor_id
+            end_bracket: )
+        - keyword: DISABLE
+        - keyword: NOVALIDATE
+        - keyword: RELY
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: product
+    - bracketed:
+      - start_bracket: (
+      - column_definition:
+          naked_identifier: product_id
+          data_type:
+            primitive_type:
+              keyword: INTEGER
+      - comma: ','
+      - column_definition:
+          naked_identifier: product_vendor_id
+          data_type:
+            primitive_type:
+              keyword: INTEGER
+      - comma: ','
+      - table_constraint:
+        - keyword: PRIMARY
+        - keyword: KEY
+        - bracketed:
+            start_bracket: (
+            column_reference:
+              naked_identifier: product_id
+            end_bracket: )
+        - keyword: DISABLE
+        - keyword: NOVALIDATE
+      - comma: ','
+      - table_constraint:
+        - keyword: CONSTRAINT
+        - object_reference:
+            naked_identifier: product_fk_1
+        - keyword: FOREIGN
+        - keyword: KEY
+        - bracketed:
+            start_bracket: (
+            column_reference:
+              naked_identifier: product_vendor_id
+            end_bracket: )
+        - keyword: REFERENCES
+        - table_reference:
+            naked_identifier: vendor
+        - bracketed:
+            start_bracket: (
+            column_reference:
+              naked_identifier: vendor_id
+            end_bracket: )
+        - keyword: DISABLE
+        - keyword: NOVALIDATE
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: vendor
+    - bracketed:
+        start_bracket: (
+        column_definition:
+          naked_identifier: vendor_id
+          data_type:
+            primitive_type:
+              keyword: INTEGER
+        comma: ','
+        table_constraint:
+        - keyword: PRIMARY
+        - keyword: KEY
+        - bracketed:
+            start_bracket: (
+            column_reference:
+              naked_identifier: vendor_id
+            end_bracket: )
+        - keyword: DISABLE
+        - keyword: NOVALIDATE
+        - keyword: NORELY
+        end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/hive/create_table_datatypes.yml b/test/fixtures/dialects/hive/create_table_datatypes.yml
index d1ee77c..112be08 100644
--- a/test/fixtures/dialects/hive/create_table_datatypes.yml
+++ b/test/fixtures/dialects/hive/create_table_datatypes.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 049d7b4751c32d8c8387eda398d38964105340e733dfe259657f8fe1aea38c4f
+_hash: 224d06e8d35e55927c093e397084485a36d7cdf2f67f972db87effd738945841
 file:
   statement:
     create_table_statement:
@@ -38,22 +38,24 @@ file:
           data_type:
             primitive_type:
               keyword: decimal
-              bracketed:
-              - start_bracket: (
-              - numeric_literal: '10'
-              - comma: ','
-              - numeric_literal: '2'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - numeric_literal: '10'
+                - comma: ','
+                - numeric_literal: '2'
+                - end_bracket: )
       - comma: ','
       - column_definition:
           naked_identifier: col5
           data_type:
-            keyword: ARRAY
-            start_angle_bracket: <
-            data_type:
-              primitive_type:
-                keyword: double
-            end_angle_bracket: '>'
+            array_type:
+              keyword: ARRAY
+              start_angle_bracket: <
+              data_type:
+                primitive_type:
+                  keyword: double
+              end_angle_bracket: '>'
       - comma: ','
       - column_definition:
           naked_identifier: col6
@@ -71,45 +73,49 @@ file:
       - column_definition:
           naked_identifier: col7
           data_type:
-          - keyword: STRUCT
-          - start_angle_bracket: <
-          - naked_identifier: field1
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: boolean
-          - comma: ','
-          - naked_identifier: field2
-          - colon: ':'
-          - data_type:
-              keyword: ARRAY
-              start_angle_bracket: <
-              data_type:
-                primitive_type:
-                - keyword: double
-                - keyword: precision
-              end_angle_bracket: '>'
-          - comma: ','
-          - naked_identifier: field3
-          - colon: ':'
-          - data_type:
-            - keyword: UNIONTYPE
-            - start_angle_bracket: <
-            - data_type:
-                primitive_type:
-                  keyword: string
-            - comma: ','
-            - data_type:
-                primitive_type:
-                  keyword: decimal
-                  bracketed:
-                  - start_bracket: (
-                  - numeric_literal: '10'
-                  - comma: ','
-                  - numeric_literal: '2'
-                  - end_bracket: )
-            - end_angle_bracket: '>'
-          - end_angle_bracket: '>'
+            struct_type:
+              keyword: STRUCT
+              struct_type_schema:
+              - start_angle_bracket: <
+              - naked_identifier: field1
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: boolean
+              - comma: ','
+              - naked_identifier: field2
+              - colon: ':'
+              - data_type:
+                  array_type:
+                    keyword: ARRAY
+                    start_angle_bracket: <
+                    data_type:
+                      primitive_type:
+                      - keyword: double
+                      - keyword: precision
+                    end_angle_bracket: '>'
+              - comma: ','
+              - naked_identifier: field3
+              - colon: ':'
+              - data_type:
+                - keyword: UNIONTYPE
+                - start_angle_bracket: <
+                - data_type:
+                    primitive_type:
+                      keyword: string
+                - comma: ','
+                - data_type:
+                    primitive_type:
+                      keyword: decimal
+                      bracketed_arguments:
+                        bracketed:
+                        - start_bracket: (
+                        - numeric_literal: '10'
+                        - comma: ','
+                        - numeric_literal: '2'
+                        - end_bracket: )
+                - end_angle_bracket: '>'
+              - end_angle_bracket: '>'
       - comma: ','
       - column_definition:
           naked_identifier: col8
@@ -121,12 +127,13 @@ file:
                 keyword: string
           - comma: ','
           - data_type:
-              keyword: ARRAY
-              start_angle_bracket: <
-              data_type:
-                primitive_type:
-                  keyword: char
-              end_angle_bracket: '>'
+              array_type:
+                keyword: ARRAY
+                start_angle_bracket: <
+                data_type:
+                  primitive_type:
+                    keyword: char
+                end_angle_bracket: '>'
           - end_angle_bracket: '>'
       - end_bracket: )
   statement_terminator: ;
diff --git a/test/fixtures/dialects/hive/select_cast.yml b/test/fixtures/dialects/hive/select_cast.yml
index fbfb026..16ff86a 100644
--- a/test/fixtures/dialects/hive/select_cast.yml
+++ b/test/fixtures/dialects/hive/select_cast.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: b17a6a2dcf84705928031ae19004613bbe8fcc1fd62159c5c3460ad46461121e
+_hash: 5ffab9520bca3181f57fc2f9cd06f404e2fcf543b9939c8e11725b69357e14c6
 file:
 - statement:
     select_statement:
@@ -39,12 +39,13 @@ file:
                   - data_type:
                       primitive_type:
                         keyword: decimal
-                        bracketed:
-                        - start_bracket: (
-                        - numeric_literal: '23'
-                        - comma: ','
-                        - numeric_literal: '2'
-                        - end_bracket: )
+                        bracketed_arguments:
+                          bracketed:
+                          - start_bracket: (
+                          - numeric_literal: '23'
+                          - comma: ','
+                          - numeric_literal: '2'
+                          - end_bracket: )
                   - end_bracket: )
               end_bracket: )
       from_clause:
diff --git a/test/fixtures/dialects/materialize/materialize_alter_statements.yml b/test/fixtures/dialects/materialize/materialize_alter_statements.yml
index 8efa4c4..623e22c 100644
--- a/test/fixtures/dialects/materialize/materialize_alter_statements.yml
+++ b/test/fixtures/dialects/materialize/materialize_alter_statements.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: b1dc0a14d9236681cd3e36e7e939d0338c339431e55e9c385df9b7982d895cd5
+_hash: e7697299c2d39a580ae55e20643bb8dc6b0d59571152394e3e5dfddb58f4a29c
 file:
 - statement:
     alter_connection_rotate_keys:
@@ -121,7 +121,7 @@ file:
     - object_reference:
         naked_identifier: name
     - keyword: AS
-    - raw: value
+    - code: value
 - statement_terminator: ;
 - statement:
     alter_secret_statement:
@@ -130,7 +130,7 @@ file:
     - object_reference:
         naked_identifier: name
     - keyword: AS
-    - raw: value
+    - code: value
 - statement_terminator: ;
 - statement:
     alter_source_sink_size_statement:
diff --git a/test/fixtures/dialects/materialize/materialize_copy_to_from_statements.yml b/test/fixtures/dialects/materialize/materialize_copy_to_from_statements.yml
index 7f5c6b0..ca9a7d5 100644
--- a/test/fixtures/dialects/materialize/materialize_copy_to_from_statements.yml
+++ b/test/fixtures/dialects/materialize/materialize_copy_to_from_statements.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: a34c4368d0453fcd8c9df9799107c972653582fd0b00ea2e230a4a7e47f48183
+_hash: 14a224ca214ea00272992458a67532a8f2524fa05cd154bdb5a18024bf39402f
 file:
 - statement:
     copy_to_statement:
@@ -69,8 +69,8 @@ file:
     - keyword: WITH
     - bracketed:
       - start_bracket: (
-      - raw: FORMAT
-      - raw: binary
+      - code: FORMAT
+      - code: binary
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -99,8 +99,8 @@ file:
     - keyword: WITH
     - bracketed:
       - start_bracket: (
-      - raw: FORMAT
-      - raw: binary
+      - code: FORMAT
+      - code: binary
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -134,11 +134,11 @@ file:
           start_bracket: (
           numeric_literal: '6'
           comma: ','
-          raw: 'NULL'
+          code: 'NULL'
           end_bracket: )
-      - raw: ORDER
-      - raw: BY
-      - raw: column1
+      - code: ORDER
+      - code: BY
+      - code: column1
       - end_bracket: )
     - keyword: TO
     - keyword: STDOUT
@@ -161,13 +161,13 @@ file:
     - keyword: WITH
     - bracketed:
       - start_bracket: (
-      - raw: FORMAT
-      - raw: CSV
+      - code: FORMAT
+      - code: CSV
       - comma: ','
-      - raw: DELIMITER
+      - code: DELIMITER
       - single_quote: "'!'"
       - comma: ','
-      - raw: QUOTE
+      - code: QUOTE
       - single_quote: "'!'"
       - end_bracket: )
 - statement_terminator: ;
@@ -181,7 +181,7 @@ file:
     - keyword: WITH
     - bracketed:
         start_bracket: (
-        raw: DELIMITER
+        code: DELIMITER
         single_quote: "'|'"
         end_bracket: )
 - statement_terminator: ;
@@ -194,8 +194,8 @@ file:
     - keyword: STDIN
     - bracketed:
       - start_bracket: (
-      - raw: FORMAT
-      - raw: CSV
+      - code: FORMAT
+      - code: CSV
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -207,7 +207,7 @@ file:
     - keyword: STDIN
     - bracketed:
         start_bracket: (
-        raw: DELIMITER
+        code: DELIMITER
         single_quote: "'|'"
         end_bracket: )
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/materialize/materialize_create_cluster_replica_statements.yml b/test/fixtures/dialects/materialize/materialize_create_cluster_replica_statements.yml
index c260a41..9e9746a 100644
--- a/test/fixtures/dialects/materialize/materialize_create_cluster_replica_statements.yml
+++ b/test/fixtures/dialects/materialize/materialize_create_cluster_replica_statements.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: ba593bbf1fa0ccc9eefa05a2a5384a80c662fc635660cb22a13c748688521334
+_hash: 463be35f1c6e42bf30ab2665e71ea802e4f444715047607ee668e52eb30b6136
 file:
 - statement:
     create_cluster_statement:
@@ -14,10 +14,10 @@ file:
     - keyword: REPLICAS
     - bracketed:
         start_bracket: (
-        raw: r1
+        code: r1
         bracketed:
           start_bracket: (
-          raw: size
+          code: size
           single_quote: "'1'"
           end_bracket: )
         end_bracket: )
@@ -31,17 +31,17 @@ file:
     - keyword: REPLICAS
     - bracketed:
       - start_bracket: (
-      - raw: r1
+      - code: r1
       - bracketed:
           start_bracket: (
-          raw: size
+          code: size
           single_quote: "'1'"
           end_bracket: )
       - comma: ','
-      - raw: r2
+      - code: r2
       - bracketed:
           start_bracket: (
-          raw: size
+          code: size
           single_quote: "'1'"
           end_bracket: )
       - end_bracket: )
@@ -55,7 +55,7 @@ file:
       - naked_identifier: default
       - dot: .
       - naked_identifier: size_1
-    - raw: SIZE
+    - code: SIZE
     - single_quote: "'large'"
 - statement_terminator: ;
 - statement:
@@ -67,7 +67,7 @@ file:
       - naked_identifier: c1
       - dot: .
       - naked_identifier: r1
-    - raw: SIZE
+    - code: SIZE
     - raw: '='
     - single_quote: "'medium'"
 - statement_terminator: ;
@@ -80,11 +80,11 @@ file:
       - naked_identifier: default
       - dot: .
       - naked_identifier: replica
-    - raw: AVAILABILITY
-    - raw: ZONE
+    - code: AVAILABILITY
+    - code: ZONE
     - single_quote: "'a'"
     - comma: ','
-    - raw: AVAILABILITY
-    - raw: ZONE
+    - code: AVAILABILITY
+    - code: ZONE
     - single_quote: "'b'"
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/materialize/materialize_create_connection_statement.yml b/test/fixtures/dialects/materialize/materialize_create_connection_statement.yml
index 9abf8c6..9a5cb73 100644
--- a/test/fixtures/dialects/materialize/materialize_create_connection_statement.yml
+++ b/test/fixtures/dialects/materialize/materialize_create_connection_statement.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: d48e57736a0884fb917150c2ddfa097d374b193e09c5eab9729c262fcdcd22f7
+_hash: 5f3d1f3a9c4277dabd7d090163feecaad34554d319d420be8d2c11cbd671a1b6
 file:
 - statement:
     create_secret_statement:
@@ -15,7 +15,7 @@ file:
     - object_reference:
         naked_identifier: name
     - keyword: AS
-    - raw: value
+    - code: value
 - statement_terminator: ;
 - statement:
     create_secret_statement:
@@ -24,7 +24,7 @@ file:
     - object_reference:
         naked_identifier: name
     - keyword: AS
-    - raw: value
+    - code: value
 - statement_terminator: ;
 - statement:
     create_connection_statement:
@@ -37,12 +37,12 @@ file:
     - keyword: PRIVATELINK
     - bracketed:
       - start_bracket: (
-      - raw: SERVICE
-      - raw: NAME
+      - code: SERVICE
+      - code: NAME
       - single_quote: "'com.amazonaws.vpce.us-east-1.vpce-svc-0e123abc123198abc'"
       - comma: ','
-      - raw: AVAILABILITY
-      - raw: ZONES
+      - code: AVAILABILITY
+      - code: ZONES
       - bracketed:
         - start_bracket: (
         - single_quote: "'use1-az1'"
@@ -63,29 +63,29 @@ file:
     - keyword: REGISTRY
     - bracketed:
       - start_bracket: (
-      - raw: URL
+      - code: URL
       - single_quote: "'https://rp-f00000bar.data.vectorized.cloud:30993'"
       - comma: ','
-      - raw: SSL
-      - raw: KEY
+      - code: SSL
+      - code: KEY
       - raw: '='
-      - raw: SECRET
-      - raw: csr_ssl_key
+      - code: SECRET
+      - code: csr_ssl_key
       - comma: ','
-      - raw: SSL
-      - raw: CERTIFICATE
+      - code: SSL
+      - code: CERTIFICATE
       - raw: '='
-      - raw: SECRET
-      - raw: csr_ssl_crt
+      - code: SECRET
+      - code: csr_ssl_crt
       - comma: ','
-      - raw: USERNAME
+      - code: USERNAME
       - raw: '='
       - single_quote: "'foo'"
       - comma: ','
-      - raw: PASSWORD
+      - code: PASSWORD
       - raw: '='
-      - raw: SECRET
-      - raw: csr_password
+      - code: SECRET
+      - code: csr_password
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -99,12 +99,12 @@ file:
     - keyword: PRIVATELINK
     - bracketed:
       - start_bracket: (
-      - raw: SERVICE
-      - raw: NAME
+      - code: SERVICE
+      - code: NAME
       - single_quote: "'com.amazonaws.vpce.us-east-1.vpce-svc-0e123abc123198abc'"
       - comma: ','
-      - raw: AVAILABILITY
-      - raw: ZONES
+      - code: AVAILABILITY
+      - code: ZONES
       - bracketed:
         - start_bracket: (
         - single_quote: "'use1-az1'"
@@ -125,12 +125,12 @@ file:
     - keyword: REGISTRY
     - bracketed:
       - start_bracket: (
-      - raw: URL
+      - code: URL
       - single_quote: "'http://my-confluent-schema-registry:8081'"
       - comma: ','
-      - raw: AWS
-      - raw: PRIVATELINK
-      - raw: privatelink_svc
+      - code: AWS
+      - code: PRIVATELINK
+      - code: privatelink_svc
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -143,20 +143,20 @@ file:
     - keyword: KAFKA
     - bracketed:
       - start_bracket: (
-      - raw: BROKER
+      - code: BROKER
       - single_quote: "'rp-f00000bar.data.vectorized.cloud:30365'"
       - comma: ','
-      - raw: SSL
-      - raw: KEY
+      - code: SSL
+      - code: KEY
       - raw: '='
-      - raw: SECRET
-      - raw: kafka_ssl_key
+      - code: SECRET
+      - code: kafka_ssl_key
       - comma: ','
-      - raw: SSL
-      - raw: CERTIFICATE
+      - code: SSL
+      - code: CERTIFICATE
       - raw: '='
-      - raw: SECRET
-      - raw: kafka_ssl_crt
+      - code: SECRET
+      - code: kafka_ssl_crt
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -169,7 +169,7 @@ file:
     - keyword: KAFKA
     - bracketed:
         start_bracket: (
-        raw: BROKERS
+        code: BROKERS
         bracketed:
         - start_bracket: (
         - single_quote: "'broker1:9092'"
@@ -188,24 +188,24 @@ file:
     - keyword: POSTGRES
     - bracketed:
       - start_bracket: (
-      - raw: HOST
+      - code: HOST
       - single_quote: "'instance.foo000.us-west-1.rds.amazonaws.com'"
       - comma: ','
-      - raw: PORT
+      - code: PORT
       - numeric_literal: '5432'
       - comma: ','
-      - raw: USER
+      - code: USER
       - single_quote: "'postgres'"
       - comma: ','
-      - raw: PASSWORD
-      - raw: SECRET
-      - raw: pgpass
+      - code: PASSWORD
+      - code: SECRET
+      - code: pgpass
       - comma: ','
-      - raw: SSL
-      - raw: MODE
+      - code: SSL
+      - code: MODE
       - single_quote: "'require'"
       - comma: ','
-      - raw: DATABASE
+      - code: DATABASE
       - single_quote: "'postgres'"
       - end_bracket: )
 - statement_terminator: ;
@@ -220,13 +220,13 @@ file:
     - keyword: TUNNEL
     - bracketed:
       - start_bracket: (
-      - raw: HOST
+      - code: HOST
       - single_quote: "'bastion-host'"
       - comma: ','
-      - raw: PORT
+      - code: PORT
       - numeric_literal: '22'
       - comma: ','
-      - raw: USER
+      - code: USER
       - single_quote: "'materialize'"
       - comma: ','
       - end_bracket: )
@@ -241,17 +241,17 @@ file:
     - keyword: POSTGRES
     - bracketed:
       - start_bracket: (
-      - raw: HOST
+      - code: HOST
       - single_quote: "'instance.foo000.us-west-1.rds.amazonaws.com'"
       - comma: ','
-      - raw: PORT
+      - code: PORT
       - numeric_literal: '5432'
       - comma: ','
-      - raw: SSH
-      - raw: TUNNEL
-      - raw: tunnel
+      - code: SSH
+      - code: TUNNEL
+      - code: tunnel
       - comma: ','
-      - raw: DATABASE
+      - code: DATABASE
       - single_quote: "'postgres'"
       - end_bracket: )
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/materialize/materialize_create_index.yml b/test/fixtures/dialects/materialize/materialize_create_index.yml
index e20f791..1b918ee 100644
--- a/test/fixtures/dialects/materialize/materialize_create_index.yml
+++ b/test/fixtures/dialects/materialize/materialize_create_index.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 8eaa2c6e51a20fe64b1770caebd8acca5c4c40d22ad42be2b3919d1b829bc399
+_hash: 45c60649e71f15fc61e09c36070046f876af3cb2e9a407e1ddd2f63489875b81
 file:
 - statement:
     create_index_statement:
@@ -16,7 +16,7 @@ file:
         naked_identifier: active_customers
     - bracketed:
         start_bracket: (
-        raw: geo_id
+        code: geo_id
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -30,10 +30,10 @@ file:
         naked_identifier: active_customers
     - bracketed:
         start_bracket: (
-        raw: upper
+        code: upper
         bracketed:
           start_bracket: (
-          raw: guid
+          code: guid
           end_bracket: )
         end_bracket: )
 - statement_terminator: ;
@@ -52,6 +52,6 @@ file:
         naked_identifier: t1
     - bracketed:
         start_bracket: (
-        raw: f1
+        code: f1
         end_bracket: )
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/materialize/materialize_create_sink_statements.yml b/test/fixtures/dialects/materialize/materialize_create_sink_statements.yml
index 2f86bfe..b8ca24a 100644
--- a/test/fixtures/dialects/materialize/materialize_create_sink_statements.yml
+++ b/test/fixtures/dialects/materialize/materialize_create_sink_statements.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 3c8ddc5e2d29bc641f89d24a14b14a3fdf1d6e695ceda407358e2d61b6321526
+_hash: 1fc428ea414b4ccb08c4c9649d3feed58070a02a1494226d32475fbd080cef7f
 file:
 - statement:
     create_sink_kafka_statement:
@@ -15,25 +15,25 @@ file:
     - object_reference:
         naked_identifier: quotes
     - keyword: INTO
-    - raw: KAFKA
-    - raw: CONNECTION
-    - raw: kafka_connection
+    - code: KAFKA
+    - code: CONNECTION
+    - code: kafka_connection
     - bracketed:
         start_bracket: (
-        raw: TOPIC
+        code: TOPIC
         single_quote: "'quotes-sink'"
         end_bracket: )
-    - raw: FORMAT
-    - raw: JSON
-    - raw: ENVELOPE
-    - raw: DEBEZIUM
-    - raw: WITH
+    - code: FORMAT
+    - code: JSON
+    - code: ENVELOPE
+    - code: DEBEZIUM
+    - code: WITH
     - bracketed:
-      - start_bracket: (
-      - raw: SIZE
-      - raw: '='
-      - single_quote: "'3xsmall'"
-      - end_bracket: )
+        start_bracket: (
+        code: SIZE
+        raw: '='
+        single_quote: "'3xsmall'"
+        end_bracket: )
 - statement_terminator: ;
 - statement:
     create_sink_kafka_statement:
@@ -45,23 +45,23 @@ file:
     - object_reference:
         naked_identifier: frank_quotes
     - keyword: INTO
-    - raw: KAFKA
-    - raw: CONNECTION
-    - raw: kafka_connection
+    - code: KAFKA
+    - code: CONNECTION
+    - code: kafka_connection
     - bracketed:
         start_bracket: (
-        raw: TOPIC
+        code: TOPIC
         single_quote: "'frank-quotes-sink'"
         end_bracket: )
-    - raw: FORMAT
-    - raw: JSON
-    - raw: ENVELOPE
-    - raw: DEBEZIUM
-    - raw: WITH
+    - code: FORMAT
+    - code: JSON
+    - code: ENVELOPE
+    - code: DEBEZIUM
+    - code: WITH
     - bracketed:
-      - start_bracket: (
-      - raw: SIZE
-      - raw: '='
-      - single_quote: "'3xsmall'"
-      - end_bracket: )
+        start_bracket: (
+        code: SIZE
+        raw: '='
+        single_quote: "'3xsmall'"
+        end_bracket: )
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/materialize/materialize_create_source_statements.yml b/test/fixtures/dialects/materialize/materialize_create_source_statements.yml
index 5e7a353..83dfb57 100644
--- a/test/fixtures/dialects/materialize/materialize_create_source_statements.yml
+++ b/test/fixtures/dialects/materialize/materialize_create_source_statements.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: a3d984f3cf9c375c5ada88ba1c0222d93beeb3efd0f472dad53eadcc7eb54eeb
+_hash: e4637e97ea11a573f43a5e1d0a19f101f3e569164fa4f0886387efd47fb8053a
 file:
 - statement:
     create_source_kafka_statement:
@@ -18,24 +18,24 @@ file:
         naked_identifier: kafka_connection
     - bracketed:
         start_bracket: (
-        raw: TOPIC
+        code: TOPIC
         single_quote: "'test_topic'"
         end_bracket: )
     - keyword: FORMAT
-    - raw: AVRO
-    - raw: USING
-    - raw: CONFLUENT
-    - raw: SCHEMA
-    - raw: REGISTRY
-    - raw: CONNECTION
-    - raw: csr_connection
-    - raw: WITH
+    - code: AVRO
+    - code: USING
+    - code: CONFLUENT
+    - code: SCHEMA
+    - code: REGISTRY
+    - code: CONNECTION
+    - code: csr_connection
+    - code: WITH
     - bracketed:
-      - start_bracket: (
-      - raw: SIZE
-      - raw: '='
-      - single_quote: "'3xsmall'"
-      - end_bracket: )
+        start_bracket: (
+        code: SIZE
+        raw: '='
+        single_quote: "'3xsmall'"
+        end_bracket: )
 - statement_terminator: ;
 - statement:
     create_view_statement:
@@ -129,24 +129,24 @@ file:
         naked_identifier: kafka_connection
     - bracketed:
         start_bracket: (
-        raw: TOPIC
+        code: TOPIC
         single_quote: "'test_topic'"
         end_bracket: )
     - keyword: FORMAT
-    - raw: PROTOBUF
-    - raw: USING
-    - raw: CONFLUENT
-    - raw: SCHEMA
-    - raw: REGISTRY
-    - raw: CONNECTION
-    - raw: csr_connection
-    - raw: WITH
+    - code: PROTOBUF
+    - code: USING
+    - code: CONFLUENT
+    - code: SCHEMA
+    - code: REGISTRY
+    - code: CONNECTION
+    - code: csr_connection
+    - code: WITH
     - bracketed:
-      - start_bracket: (
-      - raw: SIZE
-      - raw: '='
-      - single_quote: "'3xsmall'"
-      - end_bracket: )
+        start_bracket: (
+        code: SIZE
+        raw: '='
+        single_quote: "'3xsmall'"
+        end_bracket: )
 - statement_terminator: ;
 - statement:
     create_source_kafka_statement:
@@ -161,20 +161,20 @@ file:
         naked_identifier: kafka_connection
     - bracketed:
         start_bracket: (
-        raw: TOPIC
+        code: TOPIC
         single_quote: "'test_topic'"
         end_bracket: )
     - keyword: FORMAT
-    - raw: TEXT
-    - raw: ENVELOPE
-    - raw: UPSERT
-    - raw: WITH
+    - code: TEXT
+    - code: ENVELOPE
+    - code: UPSERT
+    - code: WITH
     - bracketed:
-      - start_bracket: (
-      - raw: SIZE
-      - raw: '='
-      - single_quote: "'3xsmall'"
-      - end_bracket: )
+        start_bracket: (
+        code: SIZE
+        raw: '='
+        single_quote: "'3xsmall'"
+        end_bracket: )
 - statement_terminator: ;
 - statement:
     create_source_kafka_statement:
@@ -200,21 +200,21 @@ file:
         naked_identifier: kafka_connection
     - bracketed:
         start_bracket: (
-        raw: TOPIC
+        code: TOPIC
         single_quote: "'test_topic'"
         end_bracket: )
     - keyword: FORMAT
-    - raw: CSV
-    - raw: WITH
+    - code: CSV
+    - code: WITH
     - numeric_literal: '3'
-    - raw: COLUMNS
-    - raw: WITH
+    - code: COLUMNS
+    - code: WITH
     - bracketed:
-      - start_bracket: (
-      - raw: SIZE
-      - raw: '='
-      - single_quote: "'3xsmall'"
-      - end_bracket: )
+        start_bracket: (
+        code: SIZE
+        raw: '='
+        single_quote: "'3xsmall'"
+        end_bracket: )
 - statement_terminator: ;
 - statement:
     create_source_load_generator_statement:
@@ -231,11 +231,11 @@ file:
     - keyword: TABLES
     - keyword: WITH
     - bracketed:
-      - start_bracket: (
-      - raw: SIZE
-      - raw: '='
-      - single_quote: "'3xsmall'"
-      - end_bracket: )
+        start_bracket: (
+        code: SIZE
+        raw: '='
+        single_quote: "'3xsmall'"
+        end_bracket: )
 - statement_terminator: ;
 - statement:
     create_source_load_generator_statement:
@@ -249,8 +249,8 @@ file:
     - keyword: TPCH
     - bracketed:
       - start_bracket: (
-      - raw: SCALE
-      - raw: FACTOR
+      - code: SCALE
+      - code: FACTOR
       - numeric_literal: '1'
       - end_bracket: )
     - keyword: FOR
@@ -258,11 +258,11 @@ file:
     - keyword: TABLES
     - keyword: WITH
     - bracketed:
-      - start_bracket: (
-      - raw: SIZE
-      - raw: '='
-      - single_quote: "'3xsmall'"
-      - end_bracket: )
+        start_bracket: (
+        code: SIZE
+        raw: '='
+        single_quote: "'3xsmall'"
+        end_bracket: )
 - statement_terminator: ;
 - statement:
     create_source_load_generator_statement:
@@ -276,11 +276,11 @@ file:
     - keyword: COUNTER
     - keyword: WITH
     - bracketed:
-      - start_bracket: (
-      - raw: SIZE
-      - raw: '='
-      - single_quote: "'3xsmall'"
-      - end_bracket: )
+        start_bracket: (
+        code: SIZE
+        raw: '='
+        single_quote: "'3xsmall'"
+        end_bracket: )
 - statement_terminator: ;
 - statement:
     create_source_postgres_statement:
@@ -295,7 +295,7 @@ file:
         naked_identifier: pg_connection
     - bracketed:
         start_bracket: (
-        raw: PUBLICATION
+        code: PUBLICATION
         single_quote: "'mz_source'"
         end_bracket: )
     - keyword: FOR
@@ -303,11 +303,11 @@ file:
     - keyword: TABLES
     - keyword: WITH
     - bracketed:
-      - start_bracket: (
-      - raw: SIZE
-      - raw: '='
-      - single_quote: "'3xsmall'"
-      - end_bracket: )
+        start_bracket: (
+        code: SIZE
+        raw: '='
+        single_quote: "'3xsmall'"
+        end_bracket: )
 - statement_terminator: ;
 - statement:
     create_source_postgres_statement:
@@ -322,26 +322,26 @@ file:
         naked_identifier: pg_connection
     - bracketed:
         start_bracket: (
-        raw: PUBLICATION
+        code: PUBLICATION
         single_quote: "'mz_source'"
         end_bracket: )
     - keyword: FOR
     - keyword: TABLES
     - bracketed:
       - start_bracket: (
-      - raw: table_1
+      - code: table_1
       - comma: ','
-      - raw: table_2
-      - raw: AS
-      - raw: alias_table_2
+      - code: table_2
+      - code: AS
+      - code: alias_table_2
       - end_bracket: )
     - keyword: WITH
     - bracketed:
-      - start_bracket: (
-      - raw: SIZE
-      - raw: '='
-      - single_quote: "'3xsmall'"
-      - end_bracket: )
+        start_bracket: (
+        code: SIZE
+        raw: '='
+        single_quote: "'3xsmall'"
+        end_bracket: )
 - statement_terminator: ;
 - statement:
     create_source_postgres_statement:
@@ -356,16 +356,16 @@ file:
         naked_identifier: pg_connection
     - bracketed:
       - start_bracket: (
-      - raw: PUBLICATION
+      - code: PUBLICATION
       - single_quote: "'mz_source'"
       - comma: ','
-      - raw: TEXT
-      - raw: COLUMNS
+      - code: TEXT
+      - code: COLUMNS
       - bracketed:
         - start_bracket: (
-        - raw: table
+        - code: table
         - raw: .
-        - raw: column_of_unsupported_type
+        - code: column_of_unsupported_type
         - end_bracket: )
       - end_bracket: )
     - keyword: FOR
@@ -373,11 +373,11 @@ file:
     - keyword: TABLES
     - keyword: WITH
     - bracketed:
-      - start_bracket: (
-      - raw: SIZE
-      - raw: '='
-      - single_quote: "'3xsmall'"
-      - end_bracket: )
+        start_bracket: (
+        code: SIZE
+        raw: '='
+        single_quote: "'3xsmall'"
+        end_bracket: )
 - statement_terminator: ;
 - statement:
     create_source_postgres_statement:
@@ -392,16 +392,16 @@ file:
         naked_identifier: pg_connection
     - bracketed:
         start_bracket: (
-        raw: PUBLICATION
+        code: PUBLICATION
         single_quote: "'mz_source'"
         end_bracket: )
     - keyword: WITH
     - bracketed:
-      - start_bracket: (
-      - raw: SIZE
-      - raw: '='
-      - single_quote: "'3xsmall'"
-      - end_bracket: )
+        start_bracket: (
+        code: SIZE
+        raw: '='
+        single_quote: "'3xsmall'"
+        end_bracket: )
 - statement_terminator: ;
 - statement:
     create_type_statement:
diff --git a/test/fixtures/dialects/materialize/materialize_create_views.yml b/test/fixtures/dialects/materialize/materialize_create_views.yml
index f8a5ae5..a80f2af 100644
--- a/test/fixtures/dialects/materialize/materialize_create_views.yml
+++ b/test/fixtures/dialects/materialize/materialize_create_views.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 3e47162cf0ca19d94e60235647045a9bdfb752f2efa41d00991012479e329ae4
+_hash: 4be57036ad8e690e8a4c9558ca524fb6b19854e6f04ea7c17a30896b5096745b
 file:
 - statement:
     create_materialized_view_statement:
@@ -15,9 +15,9 @@ file:
       - dot: .
       - quoted_identifier: '"test"'
     - keyword: AS
-    - raw: SELECT
+    - code: SELECT
     - numeric_literal: '1'
-    - raw: AS
+    - code: AS
     - double_quote: '"id"'
 - statement_terminator: ;
 - statement:
@@ -48,11 +48,11 @@ file:
       - dot: .
       - quoted_identifier: '"test"'
     - keyword: AS
-    - raw: SELECT
+    - code: SELECT
     - single_quote: "'{\"a\": 1}'"
     - raw: '::'
-    - raw: json
-    - raw: AS
+    - code: json
+    - code: AS
     - double_quote: '"id"'
 - statement_terminator: ;
 - statement:
@@ -63,35 +63,35 @@ file:
     - object_reference:
         naked_identifier: active_customer_per_geo
     - keyword: AS
-    - raw: SELECT
-    - raw: geo
+    - code: SELECT
+    - code: geo
     - raw: .
-    - raw: name
+    - code: name
     - comma: ','
-    - raw: count
+    - code: count
     - bracketed:
         start_bracket: (
         raw: '*'
         end_bracket: )
-    - raw: FROM
-    - raw: geo_regions
-    - raw: AS
-    - raw: geo
-    - raw: JOIN
-    - raw: active_customers
-    - raw: 'ON'
-    - raw: active_customers
+    - code: FROM
+    - code: geo_regions
+    - code: AS
+    - code: geo
+    - code: JOIN
+    - code: active_customers
+    - code: 'ON'
+    - code: active_customers
     - raw: .
-    - raw: geo_id
+    - code: geo_id
     - raw: '='
-    - raw: geo
+    - code: geo
     - raw: .
-    - raw: id
-    - raw: GROUP
-    - raw: BY
-    - raw: geo
+    - code: id
+    - code: GROUP
+    - code: BY
+    - code: geo
     - raw: .
-    - raw: name
+    - code: name
 - statement_terminator: ;
 - statement:
     create_materialized_view_statement:
@@ -101,17 +101,17 @@ file:
     - object_reference:
         naked_identifier: active_customers
     - keyword: AS
-    - raw: SELECT
-    - raw: guid
+    - code: SELECT
+    - code: guid
     - comma: ','
-    - raw: geo_id
+    - code: geo_id
     - comma: ','
-    - raw: last_active_on
-    - raw: FROM
-    - raw: customer_source
-    - raw: GROUP
-    - raw: BY
-    - raw: geo_id
+    - code: last_active_on
+    - code: FROM
+    - code: customer_source
+    - code: GROUP
+    - code: BY
+    - code: geo_id
 - statement_terminator: ;
 - statement:
     create_view_statement:
diff --git a/test/fixtures/dialects/materialize/materialize_explain_statements.yml b/test/fixtures/dialects/materialize/materialize_explain_statements.yml
index 7675e5a..201fccc 100644
--- a/test/fixtures/dialects/materialize/materialize_explain_statements.yml
+++ b/test/fixtures/dialects/materialize/materialize_explain_statements.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 9671d1e626ab3257e443b6f9a83144bdaf0a1270c3f44620d63684ede6275855
+_hash: c95b68f0e3ce20972e4b6086e90fb236dbc77e2eb7d718e4df5b4efa6158e63b
 file:
 - statement:
     explain_statement:
@@ -72,9 +72,9 @@ file:
     - keyword: WITH
     - bracketed:
       - start_bracket: (
-      - raw: arity
+      - code: arity
       - comma: ','
-      - raw: join_impls
+      - code: join_impls
       - end_bracket: )
     - keyword: VIEW
     - object_reference:
@@ -88,7 +88,7 @@ file:
     - keyword: WITH
     - bracketed:
         start_bracket: (
-        raw: arity
+        code: arity
         end_bracket: )
     - keyword: AS
     - keyword: TEXT
diff --git a/test/fixtures/dialects/materialize/materialize_subscribe_fetch_statements.yml b/test/fixtures/dialects/materialize/materialize_subscribe_fetch_statements.yml
index 25158ff..8cd427c 100644
--- a/test/fixtures/dialects/materialize/materialize_subscribe_fetch_statements.yml
+++ b/test/fixtures/dialects/materialize/materialize_subscribe_fetch_statements.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 3d0f7e2826bb710e0a8adaf01c0f6b58df93f5ab72b5441ecb904afcbbbaf479
+_hash: 211577430ed00b854e469e247c1c35a412aa5b87ea2de4e89d4c0b1d01032d50
 file:
 - statement:
     fetch_statement:
@@ -13,11 +13,11 @@ file:
         naked_identifier: c
     - keyword: WITH
     - bracketed:
-      - start_bracket: (
-      - raw: timeout
-      - raw: '='
-      - single_quote: "'1s'"
-      - end_bracket: )
+        start_bracket: (
+        code: timeout
+        raw: '='
+        single_quote: "'1s'"
+        end_bracket: )
 - statement_terminator: ;
 - statement:
     fetch_statement:
@@ -33,8 +33,8 @@ file:
         naked_identifier: c
     - keyword: CURSOR
     - keyword: FOR
-    - raw: SUBSCRIBE
-    - raw: fetch_during_ingest
+    - code: SUBSCRIBE
+    - code: fetch_during_ingest
 - statement_terminator: ;
 - statement:
     declare_statement:
@@ -43,12 +43,12 @@ file:
         naked_identifier: c
     - keyword: CURSOR
     - keyword: FOR
-    - raw: SUBSCRIBE
+    - code: SUBSCRIBE
     - bracketed:
       - start_bracket: (
-      - raw: SELECT
+      - code: SELECT
       - raw: '*'
-      - raw: FROM
-      - raw: t1
+      - code: FROM
+      - code: t1
       - end_bracket: )
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/mysql/alter_database.sql b/test/fixtures/dialects/mysql/alter_database.sql
new file mode 100644
index 0000000..e8e3a90
--- /dev/null
+++ b/test/fixtures/dialects/mysql/alter_database.sql
@@ -0,0 +1,26 @@
+ALTER DATABASE my_database
+DEFAULT CHARACTER SET utf8mb4
+COLLATE utf8mb4_0900_ai_ci
+DEFAULT ENCRYPTION 'N';
+
+ALTER DATABASE my_database
+DEFAULT CHARACTER SET = utf8mb4
+COLLATE = utf8mb4_0900_ai_ci
+DEFAULT ENCRYPTION = 'N';
+
+ALTER SCHEMA my_database
+DEFAULT CHARACTER SET utf8mb4
+COLLATE utf8mb4_0900_ai_ci
+DEFAULT ENCRYPTION 'N';
+
+ALTER DATABASE my_database
+READ ONLY DEFAULT;
+
+ALTER DATABASE my_database
+READ ONLY 0;
+
+ALTER DATABASE my_database
+READ ONLY 1;
+
+ALTER DATABASE
+READ ONLY DEFAULT;
diff --git a/test/fixtures/dialects/mysql/alter_database.yml b/test/fixtures/dialects/mysql/alter_database.yml
new file mode 100644
index 0000000..78db7d1
--- /dev/null
+++ b/test/fixtures/dialects/mysql/alter_database.yml
@@ -0,0 +1,112 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 1a3cf1b2d4e056df9b2f63b93f99b8927e62cb596d91f695a4775e1e7a8ba08a
+file:
+- statement:
+    alter_database_statement:
+    - keyword: ALTER
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: my_database
+    - alter_option_segment:
+      - keyword: DEFAULT
+      - keyword: CHARACTER
+      - keyword: SET
+      - naked_identifier: utf8mb4
+    - alter_option_segment:
+        keyword: COLLATE
+        naked_identifier: utf8mb4_0900_ai_ci
+    - alter_option_segment:
+      - keyword: DEFAULT
+      - keyword: ENCRYPTION
+      - quoted_literal: "'N'"
+- statement_terminator: ;
+- statement:
+    alter_database_statement:
+    - keyword: ALTER
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: my_database
+    - alter_option_segment:
+      - keyword: DEFAULT
+      - keyword: CHARACTER
+      - keyword: SET
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - naked_identifier: utf8mb4
+    - alter_option_segment:
+        keyword: COLLATE
+        comparison_operator:
+          raw_comparison_operator: '='
+        naked_identifier: utf8mb4_0900_ai_ci
+    - alter_option_segment:
+      - keyword: DEFAULT
+      - keyword: ENCRYPTION
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'N'"
+- statement_terminator: ;
+- statement:
+    alter_database_statement:
+    - keyword: ALTER
+    - keyword: SCHEMA
+    - database_reference:
+        naked_identifier: my_database
+    - alter_option_segment:
+      - keyword: DEFAULT
+      - keyword: CHARACTER
+      - keyword: SET
+      - naked_identifier: utf8mb4
+    - alter_option_segment:
+        keyword: COLLATE
+        naked_identifier: utf8mb4_0900_ai_ci
+    - alter_option_segment:
+      - keyword: DEFAULT
+      - keyword: ENCRYPTION
+      - quoted_literal: "'N'"
+- statement_terminator: ;
+- statement:
+    alter_database_statement:
+    - keyword: ALTER
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: my_database
+    - alter_option_segment:
+      - keyword: READ
+      - keyword: ONLY
+      - keyword: DEFAULT
+- statement_terminator: ;
+- statement:
+    alter_database_statement:
+    - keyword: ALTER
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: my_database
+    - alter_option_segment:
+      - keyword: READ
+      - keyword: ONLY
+      - numeric_literal: '0'
+- statement_terminator: ;
+- statement:
+    alter_database_statement:
+    - keyword: ALTER
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: my_database
+    - alter_option_segment:
+      - keyword: READ
+      - keyword: ONLY
+      - numeric_literal: '1'
+- statement_terminator: ;
+- statement:
+    alter_database_statement:
+    - keyword: ALTER
+    - keyword: DATABASE
+    - alter_option_segment:
+      - keyword: READ
+      - keyword: ONLY
+      - keyword: DEFAULT
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/mysql/alter_table.sql b/test/fixtures/dialects/mysql/alter_table.sql
index d85de94..e5a30bc 100644
--- a/test/fixtures/dialects/mysql/alter_table.sql
+++ b/test/fixtures/dialects/mysql/alter_table.sql
@@ -46,4 +46,6 @@ KEY_BLOCK_SIZE 8;
 ALTER TABLE `foo`.`bar` ADD INDEX `index_name`(`col_1`, `col_2`, `col_3`)
 KEY_BLOCK_SIZE 8 COMMENT 'index for col_1, col_2, col_3';
 
-ALTER TABLE `foo`.`bar` DROP INDEX `index_name`
+ALTER TABLE `foo`.`bar` DROP INDEX `index_name`;
+
+ALTER TABLE `x` ADD CONSTRAINT FOREIGN KEY(`xk`) REFERENCES `y`(`yk`);
diff --git a/test/fixtures/dialects/mysql/alter_table.yml b/test/fixtures/dialects/mysql/alter_table.yml
index 39826a9..2ed0937 100644
--- a/test/fixtures/dialects/mysql/alter_table.yml
+++ b/test/fixtures/dialects/mysql/alter_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 41fcfee6a93246aff99c4bb64693cda8eff625400fa30d5fd23e5063cf327638
+_hash: 395b48c34e9c2652d61220b12abffe11f67b90929ebf2faa8b053406caf530ea
 file:
 - statement:
     alter_table_statement:
@@ -17,11 +17,11 @@ file:
         quoted_identifier: '`name`'
         data_type:
           data_type_identifier: varchar
-          bracketed:
-            start_bracket: (
-            expression:
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
               numeric_literal: '255'
-            end_bracket: )
+              end_bracket: )
         column_constraint_segment:
         - keyword: NOT
         - keyword: 'NULL'
@@ -75,11 +75,11 @@ file:
       - quoted_identifier: '`date_of_birth`'
       - data_type:
           data_type_identifier: INT
-          bracketed:
-            start_bracket: (
-            expression:
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
               numeric_literal: '11'
-            end_bracket: )
+              end_bracket: )
       - column_constraint_segment:
           keyword: 'NULL'
       - column_constraint_segment:
@@ -100,11 +100,11 @@ file:
         quoted_identifier: '`date_of_birth`'
         data_type:
           data_type_identifier: INT
-          bracketed:
-            start_bracket: (
-            expression:
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
               numeric_literal: '11'
-            end_bracket: )
+              end_bracket: )
         column_constraint_segment:
         - keyword: NOT
         - keyword: 'NULL'
@@ -123,11 +123,11 @@ file:
         quoted_identifier: '`date_of_birth`'
         data_type:
           data_type_identifier: INT
-          bracketed:
-            start_bracket: (
-            expression:
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
               numeric_literal: '11'
-            end_bracket: )
+              end_bracket: )
     - keyword: FIRST
 - statement_terminator: ;
 - statement:
@@ -144,11 +144,11 @@ file:
         quoted_identifier: '`date_of_birth`'
         data_type:
           data_type_identifier: INT
-          bracketed:
-            start_bracket: (
-            expression:
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
               numeric_literal: '11'
-            end_bracket: )
+              end_bracket: )
     - keyword: AFTER
     - column_reference:
         quoted_identifier: '`name`'
@@ -394,3 +394,29 @@ file:
     - keyword: INDEX
     - index_reference:
         quoted_identifier: '`index_name`'
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        quoted_identifier: '`x`'
+    - keyword: ADD
+    - table_constraint:
+      - keyword: CONSTRAINT
+      - keyword: FOREIGN
+      - keyword: KEY
+      - bracketed:
+          start_bracket: (
+          column_reference:
+            quoted_identifier: '`xk`'
+          end_bracket: )
+      - keyword: REFERENCES
+      - column_reference:
+          quoted_identifier: '`y`'
+      - bracketed:
+          start_bracket: (
+          column_reference:
+            quoted_identifier: '`yk`'
+          end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/mysql/alter_view.sql b/test/fixtures/dialects/mysql/alter_view.sql
index fefc993..8f8e674 100644
--- a/test/fixtures/dialects/mysql/alter_view.sql
+++ b/test/fixtures/dialects/mysql/alter_view.sql
@@ -1,9 +1,15 @@
-ALTER VIEW v2 AS 
+ALTER VIEW v2 AS
 SELECT c, d FROM v1;
 
-ALTER VIEW v1 (c,d) AS 
+ALTER VIEW v2 AS
+(SELECT c, d FROM v1);
+
+ALTER VIEW v1 (c,d) AS
 SELECT a,max(b) FROM t1 GROUP BY a;
 
-ALTER VIEW v2 AS 
-SELECT * FROM t2 WHERE s1 IN (SELECT s1 FROM t1) 
+ALTER VIEW v2 AS
+SELECT * FROM t2 WHERE s1 IN (SELECT s1 FROM t1)
 WITH CHECK OPTION;
+
+ALTER VIEW v2 AS
+SELECT 1 UNION SELECT 2;
diff --git a/test/fixtures/dialects/mysql/alter_view.yml b/test/fixtures/dialects/mysql/alter_view.yml
index f03a780..feae37a 100644
--- a/test/fixtures/dialects/mysql/alter_view.yml
+++ b/test/fixtures/dialects/mysql/alter_view.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: cc2632369f9517b2f0dfe391784f82481b2a0941b2facec9dcec47db787fa552
+_hash: 592fa1665920db815669f5cbafad024f74137b2b6513a2e77b79e155af09eb38
 file:
 - statement:
     alter_view_statement:
@@ -30,6 +30,34 @@ file:
                 table_reference:
                   naked_identifier: v1
 - statement_terminator: ;
+- statement:
+    alter_view_statement:
+    - keyword: ALTER
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: v2
+    - keyword: AS
+    - bracketed:
+        start_bracket: (
+        select_statement:
+          select_clause:
+          - keyword: SELECT
+          - select_clause_element:
+              column_reference:
+                naked_identifier: c
+          - comma: ','
+          - select_clause_element:
+              column_reference:
+                naked_identifier: d
+          from_clause:
+            keyword: FROM
+            from_expression:
+              from_expression_element:
+                table_expression:
+                  table_reference:
+                    naked_identifier: v1
+        end_bracket: )
+- statement_terminator: ;
 - statement:
     alter_view_statement:
     - keyword: ALTER
@@ -123,3 +151,24 @@ file:
       - keyword: CHECK
       - keyword: OPTION
 - statement_terminator: ;
+- statement:
+    alter_view_statement:
+    - keyword: ALTER
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: v2
+    - keyword: AS
+    - set_expression:
+      - select_statement:
+          select_clause:
+            keyword: SELECT
+            select_clause_element:
+              numeric_literal: '1'
+      - set_operator:
+          keyword: UNION
+      - select_statement:
+          select_clause:
+            keyword: SELECT
+            select_clause_element:
+              numeric_literal: '2'
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/mysql/column_alias.sql b/test/fixtures/dialects/mysql/column_alias.sql
new file mode 100644
index 0000000..2af9b84
--- /dev/null
+++ b/test/fixtures/dialects/mysql/column_alias.sql
@@ -0,0 +1,3 @@
+SELECT 1 AS `one`;
+SELECT 2 AS 'two';
+SELECT 3 AS "three";
diff --git a/test/fixtures/dialects/mysql/column_alias.yml b/test/fixtures/dialects/mysql/column_alias.yml
new file mode 100644
index 0000000..f4629f7
--- /dev/null
+++ b/test/fixtures/dialects/mysql/column_alias.yml
@@ -0,0 +1,37 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: c9775a3b31f10dbd2195648c11645df65acb8dd428e41c317fbbd0c2941c615e
+file:
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          numeric_literal: '1'
+          alias_expression:
+            keyword: AS
+            quoted_identifier: '`one`'
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          numeric_literal: '2'
+          alias_expression:
+            keyword: AS
+            quoted_literal: "'two'"
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          numeric_literal: '3'
+          alias_expression:
+            keyword: AS
+            quoted_literal: '"three"'
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/mysql/create_database.sql b/test/fixtures/dialects/mysql/create_database.sql
new file mode 100644
index 0000000..21f8c9b
--- /dev/null
+++ b/test/fixtures/dialects/mysql/create_database.sql
@@ -0,0 +1,18 @@
+CREATE DATABASE my_database;
+
+CREATE DATABASE IF NOT EXISTS my_database;
+
+CREATE DATABASE my_database
+DEFAULT CHARACTER SET utf8mb4
+COLLATE utf8mb4_0900_ai_ci
+DEFAULT ENCRYPTION 'N';
+
+CREATE DATABASE my_database
+DEFAULT CHARACTER SET = utf8mb4
+COLLATE = utf8mb4_0900_ai_ci
+DEFAULT ENCRYPTION = 'N';
+
+CREATE SCHEMA my_database
+DEFAULT CHARACTER SET utf8mb4
+COLLATE utf8mb4_0900_ai_ci
+DEFAULT ENCRYPTION 'N';
diff --git a/test/fixtures/dialects/mysql/create_database.yml b/test/fixtures/dialects/mysql/create_database.yml
new file mode 100644
index 0000000..aab6ce1
--- /dev/null
+++ b/test/fixtures/dialects/mysql/create_database.yml
@@ -0,0 +1,87 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 5142ae149abf66ce1a11d844c6ede33d1eee99401cbeda5caa778e8cb0089167
+file:
+- statement:
+    create_database_statement:
+    - keyword: CREATE
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: my_database
+- statement_terminator: ;
+- statement:
+    create_database_statement:
+    - keyword: CREATE
+    - keyword: DATABASE
+    - keyword: IF
+    - keyword: NOT
+    - keyword: EXISTS
+    - database_reference:
+        naked_identifier: my_database
+- statement_terminator: ;
+- statement:
+    create_database_statement:
+    - keyword: CREATE
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: my_database
+    - create_option_segment:
+      - keyword: DEFAULT
+      - keyword: CHARACTER
+      - keyword: SET
+      - naked_identifier: utf8mb4
+    - create_option_segment:
+        keyword: COLLATE
+        naked_identifier: utf8mb4_0900_ai_ci
+    - create_option_segment:
+      - keyword: DEFAULT
+      - keyword: ENCRYPTION
+      - quoted_literal: "'N'"
+- statement_terminator: ;
+- statement:
+    create_database_statement:
+    - keyword: CREATE
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: my_database
+    - create_option_segment:
+      - keyword: DEFAULT
+      - keyword: CHARACTER
+      - keyword: SET
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - naked_identifier: utf8mb4
+    - create_option_segment:
+        keyword: COLLATE
+        comparison_operator:
+          raw_comparison_operator: '='
+        naked_identifier: utf8mb4_0900_ai_ci
+    - create_option_segment:
+      - keyword: DEFAULT
+      - keyword: ENCRYPTION
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'N'"
+- statement_terminator: ;
+- statement:
+    create_database_statement:
+    - keyword: CREATE
+    - keyword: SCHEMA
+    - database_reference:
+        naked_identifier: my_database
+    - create_option_segment:
+      - keyword: DEFAULT
+      - keyword: CHARACTER
+      - keyword: SET
+      - naked_identifier: utf8mb4
+    - create_option_segment:
+        keyword: COLLATE
+        naked_identifier: utf8mb4_0900_ai_ci
+    - create_option_segment:
+      - keyword: DEFAULT
+      - keyword: ENCRYPTION
+      - quoted_literal: "'N'"
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/mysql/create_index.sql b/test/fixtures/dialects/mysql/create_index.sql
new file mode 100644
index 0000000..94c6ece
--- /dev/null
+++ b/test/fixtures/dialects/mysql/create_index.sql
@@ -0,0 +1,12 @@
+CREATE INDEX idx ON tbl (col);
+CREATE UNIQUE INDEX idx ON tbl (col);
+CREATE FULLTEXT INDEX idx ON tbl (col);
+CREATE SPATIAL INDEX idx ON tbl (col);
+CREATE INDEX idx USING BTREE ON tbl (col);
+CREATE INDEX idx USING HASH ON tbl (col);
+CREATE INDEX idx ON tbl (col ASC);
+CREATE INDEX idx ON tbl (col DESC);
+CREATE INDEX part_of_name ON customer (name(10));
+CREATE INDEX idx ON tbl (col) ALGORITHM DEFAULT;
+CREATE INDEX idx ON tbl (col) LOCK DEFAULT;
+CREATE INDEX idx ON tbl ((col1 + col2), (col1 - col2), col1);
diff --git a/test/fixtures/dialects/mysql/create_index.yml b/test/fixtures/dialects/mysql/create_index.yml
new file mode 100644
index 0000000..5756dbc
--- /dev/null
+++ b/test/fixtures/dialects/mysql/create_index.yml
@@ -0,0 +1,226 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: b32f27257ab99686db1f16498efe2ecfd3a419f47ed46444b44bb1f3bddce05e
+file:
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: idx
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: tbl
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: col
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: UNIQUE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: idx
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: tbl
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: col
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: FULLTEXT
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: idx
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: tbl
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: col
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: SPATIAL
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: idx
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: tbl
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: col
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: idx
+    - index_type:
+      - keyword: USING
+      - keyword: BTREE
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: tbl
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: col
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: idx
+    - index_type:
+      - keyword: USING
+      - keyword: HASH
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: tbl
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: col
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: idx
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: tbl
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: col
+        keyword: ASC
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: idx
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: tbl
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: col
+        keyword: DESC
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: part_of_name
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: customer
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: name
+        bracketed:
+          start_bracket: (
+          numeric_literal: '10'
+          end_bracket: )
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: idx
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: tbl
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: col
+        end_bracket: )
+    - keyword: ALGORITHM
+    - keyword: DEFAULT
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: idx
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: tbl
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: col
+        end_bracket: )
+    - keyword: LOCK
+    - keyword: DEFAULT
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: idx
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: tbl
+    - bracketed:
+      - start_bracket: (
+      - bracketed:
+          start_bracket: (
+          expression:
+          - column_reference:
+              naked_identifier: col1
+          - binary_operator: +
+          - column_reference:
+              naked_identifier: col2
+          end_bracket: )
+      - comma: ','
+      - bracketed:
+          start_bracket: (
+          expression:
+          - column_reference:
+              naked_identifier: col1
+          - binary_operator: '-'
+          - column_reference:
+              naked_identifier: col2
+          end_bracket: )
+      - comma: ','
+      - column_reference:
+          naked_identifier: col1
+      - end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/mysql/create_table.yml b/test/fixtures/dialects/mysql/create_table.yml
index 9e1e00f..03f2348 100644
--- a/test/fixtures/dialects/mysql/create_table.yml
+++ b/test/fixtures/dialects/mysql/create_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: aefbc327ea54f5ebca58050d3670b6029b61a43b5be55b05c2d3680fc3843bf3
+_hash: 65200fb3726083ed628137a772c5c576374ea5dc38201fb6de7193ef010378aa
 file:
   statement:
     create_table_statement:
@@ -17,22 +17,22 @@ file:
           naked_identifier: b
           data_type:
             data_type_identifier: VARCHAR
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '255'
-              end_bracket: )
+                end_bracket: )
             keyword: BINARY
       - comma: ','
       - column_definition:
         - quoted_identifier: '`id`'
         - data_type:
             data_type_identifier: int
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '11'
-              end_bracket: )
+                end_bracket: )
             keyword: unsigned
         - column_constraint_segment:
           - keyword: NOT
diff --git a/test/fixtures/dialects/mysql/create_table_column_charset.sql b/test/fixtures/dialects/mysql/create_table_column_charset.sql
new file mode 100644
index 0000000..dcb5844
--- /dev/null
+++ b/test/fixtures/dialects/mysql/create_table_column_charset.sql
@@ -0,0 +1,6 @@
+CREATE TABLE t1
+(
+    col1 VARCHAR(5)
+      CHARACTER SET latin1
+      COLLATE latin1_german1_ci
+);
diff --git a/test/fixtures/dialects/mysql/create_table_column_charset.yml b/test/fixtures/dialects/mysql/create_table_column_charset.yml
new file mode 100644
index 0000000..398d8ad
--- /dev/null
+++ b/test/fixtures/dialects/mysql/create_table_column_charset.yml
@@ -0,0 +1,33 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 7c7aaa2edf1b20348d4e650e50d76e088af9310c66be309ecf2e0d0d9e70c60e
+file:
+  statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: t1
+    - bracketed:
+        start_bracket: (
+        column_definition:
+        - naked_identifier: col1
+        - data_type:
+            data_type_identifier: VARCHAR
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                numeric_literal: '5'
+                end_bracket: )
+        - column_constraint_segment:
+          - keyword: CHARACTER
+          - keyword: SET
+          - naked_identifier: latin1
+        - column_constraint_segment:
+            keyword: COLLATE
+            naked_identifier: latin1_german1_ci
+        end_bracket: )
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/mysql/create_table_datetime.sql b/test/fixtures/dialects/mysql/create_table_datetime.sql
index 6af5804..8dd1c85 100644
--- a/test/fixtures/dialects/mysql/create_table_datetime.sql
+++ b/test/fixtures/dialects/mysql/create_table_datetime.sql
@@ -19,5 +19,6 @@ CREATE TABLE `foo` (
   ts11 TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP(),
   ts12 TIMESTAMP NULL DEFAULT '0000-00-00 00:00:00',
   ts13 TIMESTAMP NULL DEFAULT NOW,
-  ts14 TIMESTAMP NULL DEFAULT NOW()
+  ts14 TIMESTAMP NULL DEFAULT NOW(),
+  ts15 TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
 )
diff --git a/test/fixtures/dialects/mysql/create_table_datetime.yml b/test/fixtures/dialects/mysql/create_table_datetime.yml
index 0f32888..4adf7bb 100644
--- a/test/fixtures/dialects/mysql/create_table_datetime.yml
+++ b/test/fixtures/dialects/mysql/create_table_datetime.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 60cf5e0c09e3c94be9dcab99e6548d9b4b8c78d07c24dda49540b922537d4ad4
+_hash: 188d3b1c6aae202a0b7189c03b2eec325a79482ac3b0cd8c9205e389380bb71e
 file:
   statement:
     create_table_statement:
@@ -189,4 +189,15 @@ file:
         - bracketed:
             start_bracket: (
             end_bracket: )
+      - comma: ','
+      - column_definition:
+        - naked_identifier: ts15
+        - keyword: TIMESTAMP
+        - keyword: NOT
+        - keyword: 'NULL'
+        - keyword: DEFAULT
+        - keyword: CURRENT_TIMESTAMP
+        - keyword: 'ON'
+        - keyword: UPDATE
+        - keyword: CURRENT_TIMESTAMP
       - end_bracket: )
diff --git a/test/fixtures/dialects/mysql/create_table_index.yml b/test/fixtures/dialects/mysql/create_table_index.yml
index ffa04a7..6221de7 100644
--- a/test/fixtures/dialects/mysql/create_table_index.yml
+++ b/test/fixtures/dialects/mysql/create_table_index.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 40c32eb27d8d8dc065fe6ac0e484d6b1f9ffffaf63230c70c66d24d20c93d5da
+_hash: 239a8345bf5a129ab923e9daa7100b71b1ce4d9f17d737d949fcde022171e019
 file:
   statement:
     create_table_statement:
@@ -28,11 +28,11 @@ file:
           naked_identifier: a
           data_type:
             data_type_identifier: TEXT
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '500'
-              end_bracket: )
+                end_bracket: )
       - comma: ','
       - column_definition:
           naked_identifier: b
diff --git a/test/fixtures/dialects/mysql/create_view.sql b/test/fixtures/dialects/mysql/create_view.sql
index 875ac57..39e8015 100644
--- a/test/fixtures/dialects/mysql/create_view.sql
+++ b/test/fixtures/dialects/mysql/create_view.sql
@@ -1,7 +1,12 @@
-CREATE VIEW v1 (c,d) AS 
+CREATE VIEW v1 (c,d) AS
 SELECT a,b FROM t1;
 
-CREATE OR REPLACE VIEW v1 (c,d,e,f) AS 
+CREATE OR REPLACE VIEW v1 (c,d,e,f) AS
 SELECT a,b, a IN (SELECT a+2 FROM t1), a = all (SELECT a FROM t1) FROM t1;
 
 CREATE VIEW v2 AS SELECT a FROM t1 WITH CASCADED CHECK OPTION;
+
+CREATE VIEW v2 AS (SELECT a FROM t1) WITH CASCADED CHECK OPTION;
+
+CREATE VIEW v2 AS
+SELECT 1 UNION SELECT 2;
diff --git a/test/fixtures/dialects/mysql/create_view.yml b/test/fixtures/dialects/mysql/create_view.yml
index 90ac6db..c0749de 100644
--- a/test/fixtures/dialects/mysql/create_view.yml
+++ b/test/fixtures/dialects/mysql/create_view.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 2d5ec417744c700eb1c088a09725b46b796614c5df4076a7474e6147f75f9e05
+_hash: 3bb1e638499d52434c20d168ce195274b3a557ac2b38cf7244b16d97e841d3aa
 file:
 - statement:
     create_view_statement:
@@ -157,3 +157,53 @@ file:
       - keyword: CHECK
       - keyword: OPTION
 - statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: CREATE
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: v2
+    - keyword: AS
+    - bracketed:
+        start_bracket: (
+        select_statement:
+          select_clause:
+            keyword: SELECT
+            select_clause_element:
+              column_reference:
+                naked_identifier: a
+          from_clause:
+            keyword: FROM
+            from_expression:
+              from_expression_element:
+                table_expression:
+                  table_reference:
+                    naked_identifier: t1
+        end_bracket: )
+    - with_check_options:
+      - keyword: WITH
+      - keyword: CASCADED
+      - keyword: CHECK
+      - keyword: OPTION
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: CREATE
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: v2
+    - keyword: AS
+    - set_expression:
+      - select_statement:
+          select_clause:
+            keyword: SELECT
+            select_clause_element:
+              numeric_literal: '1'
+      - set_operator:
+          keyword: UNION
+      - select_statement:
+          select_clause:
+            keyword: SELECT
+            select_clause_element:
+              numeric_literal: '2'
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/mysql/explain.yml b/test/fixtures/dialects/mysql/explain.yml
index 248ce17..48a53a0 100644
--- a/test/fixtures/dialects/mysql/explain.yml
+++ b/test/fixtures/dialects/mysql/explain.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: a1fbac192cf02d98f28a924f4b894fce63a8c73ecd867e5eea4d56c1fa6f49da
+_hash: 3628780103c71c5a4e04ea5423c51580c32bcd022b3659fce570c05bae3a63cd
 file:
 - statement:
     explain_statement:
@@ -19,11 +19,8 @@ file:
       keyword: explain
       update_statement:
         keyword: update
-        from_expression:
-          from_expression_element:
-            table_expression:
-              table_reference:
-                naked_identifier: tbl
+        table_reference:
+          naked_identifier: tbl
         set_clause_list:
           keyword: set
           set_clause:
diff --git a/test/fixtures/dialects/mysql/function_return.sql b/test/fixtures/dialects/mysql/function_return.sql
new file mode 100644
index 0000000..d0cb692
--- /dev/null
+++ b/test/fixtures/dialects/mysql/function_return.sql
@@ -0,0 +1,6 @@
+CREATE FUNCTION `testfunction`(var1 int)
+RETURNS int
+DETERMINISTIC
+BEGIN
+RETURN (var1 + 1);
+END~
diff --git a/test/fixtures/dialects/mysql/function_return.yml b/test/fixtures/dialects/mysql/function_return.yml
new file mode 100644
index 0000000..76bc569
--- /dev/null
+++ b/test/fixtures/dialects/mysql/function_return.yml
@@ -0,0 +1,45 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: f900c91d9372a026ebd6e47b33e72d76158191583040ff73d52264c797df7317
+file:
+- statement:
+    create_function_statement:
+    - keyword: CREATE
+    - keyword: FUNCTION
+    - function_name:
+        quoted_identifier: '`testfunction`'
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          parameter: var1
+          data_type:
+            data_type_identifier: int
+          end_bracket: )
+    - keyword: RETURNS
+    - data_type:
+        data_type_identifier: int
+    - characteristic_statement:
+        keyword: DETERMINISTIC
+    - function_definition:
+        transaction_statement:
+          keyword: BEGIN
+          statement:
+            return_statement:
+              keyword: RETURN
+              expression:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    column_reference:
+                      naked_identifier: var1
+                    binary_operator: +
+                    numeric_literal: '1'
+                  end_bracket: )
+- statement_terminator: ;
+- statement:
+    transaction_statement:
+      keyword: END
+- statement_terminator: '~'
diff --git a/test/fixtures/dialects/mysql/interval.yml b/test/fixtures/dialects/mysql/interval.yml
index c2879ae..f01e613 100644
--- a/test/fixtures/dialects/mysql/interval.yml
+++ b/test/fixtures/dialects/mysql/interval.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 7ddd990903aaf070992fc87aca63aac0b651b984d60136b2975d2b98d7df5b4c
+_hash: 985f29f0b5b895f3e2b840714cec791c128deb41151edccecb3ee1d61c9fda4e
 file:
 - statement:
     select_statement:
@@ -96,11 +96,11 @@ file:
                 quoted_literal: "'1992-12-31 23:59:59.000002'"
             - comma: ','
             - expression:
-                keyword: INTERVAL
-                date_constructor_literal: "'1.999999'"
-            - expression:
-                column_reference:
-                  naked_identifier: SECOND_MICROSECOND
+                interval_expression:
+                  keyword: INTERVAL
+                  expression:
+                    quoted_literal: "'1.999999'"
+                  date_part: SECOND_MICROSECOND
             - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -117,11 +117,11 @@ file:
                 quoted_literal: "'2100-12-31 23:59:59'"
             - comma: ','
             - expression:
-                keyword: INTERVAL
-                date_constructor_literal: "'1:1'"
-            - expression:
-                column_reference:
-                  naked_identifier: MINUTE_SECOND
+                interval_expression:
+                  keyword: INTERVAL
+                  expression:
+                    quoted_literal: "'1:1'"
+                  date_part: MINUTE_SECOND
             - end_bracket: )
 - statement_terminator: ;
 - statement:
diff --git a/test/fixtures/dialects/mysql/purge_binary_logs.sql b/test/fixtures/dialects/mysql/purge_binary_logs.sql
index 97588ff..d1f5c13 100644
--- a/test/fixtures/dialects/mysql/purge_binary_logs.sql
+++ b/test/fixtures/dialects/mysql/purge_binary_logs.sql
@@ -1,9 +1,7 @@
 PURGE BINARY LOGS TO 'mysql-bin.010';
 
 PURGE BINARY LOGS BEFORE '2019-04-02 22:46:26';
-PURGE BINARY LOGS BEFORE DATETIME '2019-04-02 22:46:26';
 PURGE BINARY LOGS BEFORE TIMESTAMP '2019-04-02 22:46:26';
 
 PURGE BINARY LOGS BEFORE 19830905132800;
-PURGE BINARY LOGS BEFORE DATETIME 19830905132800;
 PURGE BINARY LOGS BEFORE TIMESTAMP 19830905132800;
diff --git a/test/fixtures/dialects/mysql/purge_binary_logs.yml b/test/fixtures/dialects/mysql/purge_binary_logs.yml
index af49b05..a9a2d81 100644
--- a/test/fixtures/dialects/mysql/purge_binary_logs.yml
+++ b/test/fixtures/dialects/mysql/purge_binary_logs.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 51c15c89e4dc6b04a76d423156b4a5be282cdf103af91fa0a316e8cd51725770
+_hash: 7bb1441e7e4eeb3327daa0ca6330a42e1887e1440bbbc94266437831ca63ec77
 file:
 - statement:
     purge_binary_logs_statement:
@@ -19,7 +19,8 @@ file:
     - keyword: BINARY
     - keyword: LOGS
     - keyword: BEFORE
-    - date_constructor_literal: "'2019-04-02 22:46:26'"
+    - expression:
+        quoted_literal: "'2019-04-02 22:46:26'"
 - statement_terminator: ;
 - statement:
     purge_binary_logs_statement:
@@ -27,8 +28,9 @@ file:
     - keyword: BINARY
     - keyword: LOGS
     - keyword: BEFORE
-    - keyword: DATETIME
-    - date_constructor_literal: "'2019-04-02 22:46:26'"
+    - expression:
+        keyword: TIMESTAMP
+        date_constructor_literal: "'2019-04-02 22:46:26'"
 - statement_terminator: ;
 - statement:
     purge_binary_logs_statement:
@@ -36,8 +38,8 @@ file:
     - keyword: BINARY
     - keyword: LOGS
     - keyword: BEFORE
-    - keyword: TIMESTAMP
-    - date_constructor_literal: "'2019-04-02 22:46:26'"
+    - expression:
+        numeric_literal: '19830905132800'
 - statement_terminator: ;
 - statement:
     purge_binary_logs_statement:
@@ -45,23 +47,7 @@ file:
     - keyword: BINARY
     - keyword: LOGS
     - keyword: BEFORE
-    - numeric_literal: '19830905132800'
-- statement_terminator: ;
-- statement:
-    purge_binary_logs_statement:
-    - keyword: PURGE
-    - keyword: BINARY
-    - keyword: LOGS
-    - keyword: BEFORE
-    - keyword: DATETIME
-    - numeric_literal: '19830905132800'
-- statement_terminator: ;
-- statement:
-    purge_binary_logs_statement:
-    - keyword: PURGE
-    - keyword: BINARY
-    - keyword: LOGS
-    - keyword: BEFORE
-    - keyword: TIMESTAMP
-    - numeric_literal: '19830905132800'
+    - expression:
+        keyword: TIMESTAMP
+        numeric_literal: '19830905132800'
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/mysql/quoted_literal.yml b/test/fixtures/dialects/mysql/quoted_literal.yml
index d95d2b3..ef4ec1d 100644
--- a/test/fixtures/dialects/mysql/quoted_literal.yml
+++ b/test/fixtures/dialects/mysql/quoted_literal.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 2f87d8252b0b31dabebfcb159b1a7833cc00684b7aaa53286c32683fcceaaba2
+_hash: ad88d0ef3cc10c070f32053dbaea0a7d788e8f01ef4ea30ac820784eb76cc4f6
 file:
 - statement:
     select_statement:
@@ -166,11 +166,8 @@ file:
 - statement:
     update_statement:
       keyword: UPDATE
-      from_expression:
-        from_expression_element:
-          table_expression:
-            table_reference:
-              naked_identifier: table1
+      table_reference:
+        naked_identifier: table1
       set_clause_list:
         keyword: SET
         set_clause:
@@ -183,11 +180,8 @@ file:
 - statement:
     update_statement:
       keyword: UPDATE
-      from_expression:
-        from_expression_element:
-          table_expression:
-            table_reference:
-              naked_identifier: table1
+      table_reference:
+        naked_identifier: table1
       set_clause_list:
         keyword: SET
         set_clause:
diff --git a/test/fixtures/dialects/mysql/update.yml b/test/fixtures/dialects/mysql/update.yml
index a159d69..4297a8c 100644
--- a/test/fixtures/dialects/mysql/update.yml
+++ b/test/fixtures/dialects/mysql/update.yml
@@ -3,16 +3,13 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: e929b61b11d4ebbe7dcba4df45f5ee2d5e92b28ec812fd6032b7076f1ec4c3c2
+_hash: e97422c5a174d58b5638ee735094162724c01779876f33efb13089dcad90e6a2
 file:
 - statement:
     update_statement:
       keyword: UPDATE
-      from_expression:
-        from_expression_element:
-          table_expression:
-            table_reference:
-              naked_identifier: t1
+      table_reference:
+        naked_identifier: t1
       set_clause_list:
         keyword: SET
         set_clause:
@@ -29,11 +26,8 @@ file:
 - statement:
     update_statement:
       keyword: UPDATE
-      from_expression:
-        from_expression_element:
-          table_expression:
-            table_reference:
-              naked_identifier: t1
+      table_reference:
+        naked_identifier: t1
       set_clause_list:
       - keyword: SET
       - set_clause:
@@ -57,16 +51,13 @@ file:
 - statement_terminator: ;
 - statement:
     update_statement:
-      keyword: UPDATE
-      table_reference:
+    - keyword: UPDATE
+    - table_reference:
         naked_identifier: items
-      comma: ','
-      from_expression:
-        from_expression_element:
-          table_expression:
-            table_reference:
-              naked_identifier: month
-      set_clause_list:
+    - comma: ','
+    - table_reference:
+        naked_identifier: month
+    - set_clause_list:
         keyword: SET
         set_clause:
         - column_reference:
@@ -79,7 +70,7 @@ file:
           - naked_identifier: month
           - dot: .
           - naked_identifier: price
-      where_clause:
+    - where_clause:
         keyword: WHERE
         expression:
         - column_reference:
@@ -96,11 +87,8 @@ file:
 - statement:
     update_statement:
       keyword: UPDATE
-      from_expression:
-        from_expression_element:
-          table_expression:
-            table_reference:
-              naked_identifier: t
+      table_reference:
+        naked_identifier: t
       set_clause_list:
         keyword: SET
         set_clause:
@@ -123,11 +111,8 @@ file:
 - statement:
     update_statement:
       keyword: UPDATE
-      from_expression:
-        from_expression_element:
-          table_expression:
-            table_reference:
-              naked_identifier: items
+      table_reference:
+        naked_identifier: items
       set_clause_list:
         keyword: SET
         set_clause:
@@ -373,11 +358,8 @@ file:
     update_statement:
     - keyword: UPDATE
     - keyword: LOW_PRIORITY
-    - from_expression:
-        from_expression_element:
-          table_expression:
-            table_reference:
-              naked_identifier: foo
+    - table_reference:
+        naked_identifier: foo
     - set_clause_list:
         keyword: SET
         set_clause:
@@ -392,16 +374,13 @@ file:
 - statement_terminator: ;
 - statement:
     update_statement:
-      keyword: UPDATE
-      table_reference:
+    - keyword: UPDATE
+    - table_reference:
         naked_identifier: a
-      comma: ','
-      from_expression:
-        from_expression_element:
-          table_expression:
-            table_reference:
-              naked_identifier: b
-      set_clause_list:
+    - comma: ','
+    - table_reference:
+        naked_identifier: b
+    - set_clause_list:
         keyword: SET
         set_clause:
         - column_reference:
@@ -414,7 +393,7 @@ file:
           - naked_identifier: b
           - dot: .
           - naked_identifier: name
-      where_clause:
+    - where_clause:
         keyword: WHERE
         expression:
         - column_reference:
diff --git a/test/fixtures/dialects/mysql/variable_assignment.yml b/test/fixtures/dialects/mysql/variable_assignment.yml
index 2071e73..4caa984 100644
--- a/test/fixtures/dialects/mysql/variable_assignment.yml
+++ b/test/fixtures/dialects/mysql/variable_assignment.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: afaf7d13bb288b44720e378bce9b97f290a3d003ca4bc17e02440b6b1438ba95
+_hash: bb42313b72d36da00df4839494e6e7357c200742df78c658ddbbbe051746a937
 file:
 - statement:
     select_statement:
@@ -48,11 +48,8 @@ file:
 - statement:
     update_statement:
       keyword: UPDATE
-      from_expression:
-        from_expression_element:
-          table_expression:
-            table_reference:
-              naked_identifier: t1
+      table_reference:
+        naked_identifier: t1
       set_clause_list:
         keyword: SET
         set_clause:
diff --git a/test/fixtures/dialects/oracle/alter_table.yml b/test/fixtures/dialects/oracle/alter_table.yml
index 0132484..85e3d46 100644
--- a/test/fixtures/dialects/oracle/alter_table.yml
+++ b/test/fixtures/dialects/oracle/alter_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 97b72bf4757d9b6121f40a94df1136f6d551faa919fd929adbc68d0ef932335b
+_hash: 62e662faef165ee133558b909165aafc753074f67cdf25a3870c7f25ecc55390
 file:
 - statement:
     alter_table_statement:
@@ -34,11 +34,11 @@ file:
             naked_identifier: column_name
             data_type:
               data_type_identifier: NUMBER
-              bracketed:
-                start_bracket: (
-                expression:
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
                   numeric_literal: '18'
-                end_bracket: )
+                  end_bracket: )
           end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -53,11 +53,11 @@ file:
           naked_identifier: column_name
           data_type:
             data_type_identifier: NUMBER
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '18'
-              end_bracket: )
+                end_bracket: )
 - statement_terminator: ;
 - statement:
     alter_table_statement:
diff --git a/test/fixtures/dialects/postgres/postgres_alter_database.yml b/test/fixtures/dialects/postgres/postgres_alter_database.yml
index 2a6e401..5e23c2f 100644
--- a/test/fixtures/dialects/postgres/postgres_alter_database.yml
+++ b/test/fixtures/dialects/postgres/postgres_alter_database.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: a825d9fed6996a22b4285be95b488003a962aa579f2d04f2e478075c2bc507d0
+_hash: b5ca029c5ff1a0e82e534f264dfe874a5dadb9863c930dc8118f51b94e818f3b
 file:
 - statement:
     alter_database_statement:
@@ -285,11 +285,10 @@ file:
     - role_reference:
         naked_identifier: some_user
     - keyword: SET
-    - object_reference:
-        naked_identifier: default_transaction_read_only
+    - parameter: default_transaction_read_only
     - comparison_operator:
         raw_comparison_operator: '='
-    - keyword: 'ON'
+    - naked_identifier: 'ON'
 - statement_terminator: ;
 - statement:
     alter_database_statement:
diff --git a/test/fixtures/dialects/postgres/postgres_alter_publication.sql b/test/fixtures/dialects/postgres/postgres_alter_publication.sql
new file mode 100644
index 0000000..5b55f01
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_alter_publication.sql
@@ -0,0 +1,33 @@
+-- More thorough testing of the PublicationObjectsSegment is in postgres_create_publication.sql.
+
+ALTER PUBLICATION abc ADD TABLE def;
+
+ALTER PUBLICATION abc ADD TABLE def, TABLE ghi;
+
+ALTER PUBLICATION abc ADD TABLE def, ghi*, ONLY jkl, ONLY (mno);
+
+ALTER PUBLICATION abc SET TABLE def, ghi, TABLES IN SCHEMA y, z, CURRENT_SCHEMA;
+
+ALTER PUBLICATION abc SET (publish = 'insert,update', publish_via_partition_root = TRUE);
+
+ALTER PUBLICATION abc OWNER TO bob;
+
+ALTER PUBLICATION abc OWNER TO CURRENT_ROLE;
+
+ALTER PUBLICATION abc OWNER TO CURRENT_USER;
+
+ALTER PUBLICATION abc OWNER TO SESSION_USER;
+
+ALTER PUBLICATION abc RENAME TO def;
+
+-- examples from https://www.postgresql.org/docs/15/sql-alterpublication.html
+
+ALTER PUBLICATION noinsert SET (publish = 'update, delete');
+
+ALTER PUBLICATION mypublication ADD TABLE users (user_id, firstname), departments;
+
+ALTER PUBLICATION mypublication SET TABLE users (user_id, firstname, lastname), TABLE departments;
+
+ALTER PUBLICATION sales_publication ADD TABLES IN SCHEMA marketing, sales;
+
+ALTER PUBLICATION production_publication ADD TABLE users, departments, TABLES IN SCHEMA production;
diff --git a/test/fixtures/dialects/postgres/postgres_alter_publication.yml b/test/fixtures/dialects/postgres/postgres_alter_publication.yml
new file mode 100644
index 0000000..296cf83
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_alter_publication.yml
@@ -0,0 +1,287 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: d78f6bf54923eefc7cbef2aa7bc87808e6b13b9c1a099957863705d933ed049b
+file:
+- statement:
+    alter_publication_statement:
+    - keyword: ALTER
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: ADD
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: def
+- statement_terminator: ;
+- statement:
+    alter_publication_statement:
+    - keyword: ALTER
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: ADD
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: def
+    - comma: ','
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: ghi
+- statement_terminator: ;
+- statement:
+    alter_publication_statement:
+    - keyword: ALTER
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: ADD
+    - publication_objects:
+      - keyword: TABLE
+      - publication_table:
+          table_reference:
+            naked_identifier: def
+      - comma: ','
+      - publication_table:
+          table_reference:
+            naked_identifier: ghi
+          star: '*'
+      - comma: ','
+      - publication_table:
+          keyword: ONLY
+          table_reference:
+            naked_identifier: jkl
+      - comma: ','
+      - publication_table:
+          keyword: ONLY
+          bracketed:
+            start_bracket: (
+            table_reference:
+              naked_identifier: mno
+            end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_publication_statement:
+    - keyword: ALTER
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: SET
+    - publication_objects:
+      - keyword: TABLE
+      - publication_table:
+          table_reference:
+            naked_identifier: def
+      - comma: ','
+      - publication_table:
+          table_reference:
+            naked_identifier: ghi
+    - comma: ','
+    - publication_objects:
+      - keyword: TABLES
+      - keyword: IN
+      - keyword: SCHEMA
+      - schema_reference:
+          naked_identifier: y
+      - comma: ','
+      - schema_reference:
+          naked_identifier: z
+      - comma: ','
+      - keyword: CURRENT_SCHEMA
+- statement_terminator: ;
+- statement:
+    alter_publication_statement:
+    - keyword: ALTER
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: SET
+    - definition_parameters:
+        bracketed:
+        - start_bracket: (
+        - definition_parameter:
+            properties_naked_identifier: publish
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'insert,update'"
+        - comma: ','
+        - definition_parameter:
+            properties_naked_identifier: publish_via_partition_root
+            comparison_operator:
+              raw_comparison_operator: '='
+            boolean_literal: 'TRUE'
+        - end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_publication_statement:
+    - keyword: ALTER
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: OWNER
+    - keyword: TO
+    - role_reference:
+        naked_identifier: bob
+- statement_terminator: ;
+- statement:
+    alter_publication_statement:
+    - keyword: ALTER
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: OWNER
+    - keyword: TO
+    - keyword: CURRENT_ROLE
+- statement_terminator: ;
+- statement:
+    alter_publication_statement:
+    - keyword: ALTER
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: OWNER
+    - keyword: TO
+    - keyword: CURRENT_USER
+- statement_terminator: ;
+- statement:
+    alter_publication_statement:
+    - keyword: ALTER
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: OWNER
+    - keyword: TO
+    - keyword: SESSION_USER
+- statement_terminator: ;
+- statement:
+    alter_publication_statement:
+    - keyword: ALTER
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: RENAME
+    - keyword: TO
+    - publication_reference:
+        naked_identifier: def
+- statement_terminator: ;
+- statement:
+    alter_publication_statement:
+    - keyword: ALTER
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: noinsert
+    - keyword: SET
+    - definition_parameters:
+        bracketed:
+          start_bracket: (
+          definition_parameter:
+            properties_naked_identifier: publish
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'update, delete'"
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_publication_statement:
+    - keyword: ALTER
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: mypublication
+    - keyword: ADD
+    - publication_objects:
+      - keyword: TABLE
+      - publication_table:
+          table_reference:
+            naked_identifier: users
+          bracketed:
+          - start_bracket: (
+          - column_reference:
+              naked_identifier: user_id
+          - comma: ','
+          - column_reference:
+              naked_identifier: firstname
+          - end_bracket: )
+      - comma: ','
+      - publication_table:
+          table_reference:
+            naked_identifier: departments
+- statement_terminator: ;
+- statement:
+    alter_publication_statement:
+    - keyword: ALTER
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: mypublication
+    - keyword: SET
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: users
+          bracketed:
+          - start_bracket: (
+          - column_reference:
+              naked_identifier: user_id
+          - comma: ','
+          - column_reference:
+              naked_identifier: firstname
+          - comma: ','
+          - column_reference:
+              naked_identifier: lastname
+          - end_bracket: )
+    - comma: ','
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: departments
+- statement_terminator: ;
+- statement:
+    alter_publication_statement:
+    - keyword: ALTER
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: sales_publication
+    - keyword: ADD
+    - publication_objects:
+      - keyword: TABLES
+      - keyword: IN
+      - keyword: SCHEMA
+      - schema_reference:
+          naked_identifier: marketing
+      - comma: ','
+      - schema_reference:
+          naked_identifier: sales
+- statement_terminator: ;
+- statement:
+    alter_publication_statement:
+    - keyword: ALTER
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: production_publication
+    - keyword: ADD
+    - publication_objects:
+      - keyword: TABLE
+      - publication_table:
+          table_reference:
+            naked_identifier: users
+      - comma: ','
+      - publication_table:
+          table_reference:
+            naked_identifier: departments
+    - comma: ','
+    - publication_objects:
+      - keyword: TABLES
+      - keyword: IN
+      - keyword: SCHEMA
+      - schema_reference:
+          naked_identifier: production
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_alter_role.sql b/test/fixtures/dialects/postgres/postgres_alter_role.sql
index afa21d3..1933970 100644
--- a/test/fixtures/dialects/postgres/postgres_alter_role.sql
+++ b/test/fixtures/dialects/postgres/postgres_alter_role.sql
@@ -4,8 +4,6 @@ ALTER ROLE chris VALID UNTIL 'May 4 12:00:00 2015 +1';
 ALTER ROLE fred VALID UNTIL 'infinity';
 ALTER ROLE worker_bee SET maintenance_work_mem = '100000';
 ALTER ROLE fred IN DATABASE devel SET client_min_messages TO DEFAULT;
-ALTER ROLE fred IN DATABASE devel SET client_min_messages FROM CURRENT;
-ALTER ROLE fred IN DATABASE devel RESET ALL;
 ALTER ROLE miriam CREATEROLE CREATEDB;
 ALTER USER davide WITH PASSWORD 'hu8jmn3';
 ALTER USER davide WITH PASSWORD NULL;
@@ -13,6 +11,21 @@ ALTER USER chris VALID UNTIL 'May 4 12:00:00 2015 +1';
 ALTER USER fred VALID UNTIL 'infinity';
 ALTER USER worker_bee SET maintenance_work_mem = '100000';
 ALTER USER fred IN DATABASE devel SET client_min_messages TO DEFAULT;
-ALTER USER fred IN DATABASE devel SET client_min_messages FROM CURRENT;
-ALTER USER fred IN DATABASE devel RESET ALL;
 ALTER USER miriam CREATEROLE CREATEDB;
+
+-- more SET tests:
+ALTER ROLE fred SET testing FROM CURRENT;
+ALTER ROLE fred IN DATABASE devel SET testing FROM CURRENT;
+ALTER ROLE fred IN DATABASE devel SET testing TO 1234;
+ALTER ROLE fred IN DATABASE devel SET testing = 1234;
+ALTER ROLE fred IN DATABASE devel SET testing TO DEFAULT;
+ALTER ROLE fred IN DATABASE devel SET testing = DEFAULT;
+ALTER ROLE fred IN DATABASE devel SET testing = TRUE;
+ALTER ROLE fred IN DATABASE devel SET testing = FALSE;
+ALTER ROLE fred IN DATABASE devel SET testing = 'string value';
+ALTER ROLE fred IN DATABASE devel SET testing = on, off, auto;
+
+ALTER ROLE fred RESET ALL;
+ALTER ROLE fred RESET testing;
+ALTER ROLE fred IN DATABASE devel RESET ALL;
+ALTER ROLE fred IN DATABASE devel RESET testing;
diff --git a/test/fixtures/dialects/postgres/postgres_alter_role.yml b/test/fixtures/dialects/postgres/postgres_alter_role.yml
index 8fefc07..91b07cf 100644
--- a/test/fixtures/dialects/postgres/postgres_alter_role.yml
+++ b/test/fixtures/dialects/postgres/postgres_alter_role.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 13cb7d7e697ed0d1d7583c297ca51108f94d7f81d604da6e3f5634b2693f3951
+_hash: d366aa4ccbfe1b7a349001b32e9a6ae740fc7b1840b39b44b5b6790fa2ffe559
 file:
 - statement:
     alter_role_statement:
@@ -52,8 +52,7 @@ file:
     - role_reference:
         naked_identifier: worker_bee
     - keyword: SET
-    - object_reference:
-        naked_identifier: maintenance_work_mem
+    - parameter: maintenance_work_mem
     - comparison_operator:
         raw_comparison_operator: '='
     - quoted_literal: "'100000'"
@@ -66,43 +65,13 @@ file:
         naked_identifier: fred
     - keyword: IN
     - keyword: DATABASE
-    - object_reference:
+    - database_reference:
         naked_identifier: devel
     - keyword: SET
-    - object_reference:
-        naked_identifier: client_min_messages
+    - parameter: client_min_messages
     - keyword: TO
     - keyword: DEFAULT
 - statement_terminator: ;
-- statement:
-    alter_role_statement:
-    - keyword: ALTER
-    - keyword: ROLE
-    - role_reference:
-        naked_identifier: fred
-    - keyword: IN
-    - keyword: DATABASE
-    - object_reference:
-        naked_identifier: devel
-    - keyword: SET
-    - object_reference:
-        naked_identifier: client_min_messages
-    - keyword: FROM
-    - keyword: CURRENT
-- statement_terminator: ;
-- statement:
-    alter_role_statement:
-    - keyword: ALTER
-    - keyword: ROLE
-    - role_reference:
-        naked_identifier: fred
-    - keyword: IN
-    - keyword: DATABASE
-    - object_reference:
-        naked_identifier: devel
-    - keyword: RESET
-    - keyword: ALL
-- statement_terminator: ;
 - statement:
     alter_role_statement:
     - keyword: ALTER
@@ -159,8 +128,7 @@ file:
     - role_reference:
         naked_identifier: worker_bee
     - keyword: SET
-    - object_reference:
-        naked_identifier: maintenance_work_mem
+    - parameter: maintenance_work_mem
     - comparison_operator:
         raw_comparison_operator: '='
     - quoted_literal: "'100000'"
@@ -173,11 +141,10 @@ file:
         naked_identifier: fred
     - keyword: IN
     - keyword: DATABASE
-    - object_reference:
+    - database_reference:
         naked_identifier: devel
     - keyword: SET
-    - object_reference:
-        naked_identifier: client_min_messages
+    - parameter: client_min_messages
     - keyword: TO
     - keyword: DEFAULT
 - statement_terminator: ;
@@ -185,37 +152,208 @@ file:
     alter_role_statement:
     - keyword: ALTER
     - keyword: USER
+    - role_reference:
+        naked_identifier: miriam
+    - keyword: CREATEROLE
+    - keyword: CREATEDB
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - role_reference:
+        naked_identifier: fred
+    - keyword: SET
+    - parameter: testing
+    - keyword: FROM
+    - keyword: CURRENT
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
     - role_reference:
         naked_identifier: fred
     - keyword: IN
     - keyword: DATABASE
-    - object_reference:
+    - database_reference:
         naked_identifier: devel
     - keyword: SET
-    - object_reference:
-        naked_identifier: client_min_messages
+    - parameter: testing
     - keyword: FROM
     - keyword: CURRENT
 - statement_terminator: ;
 - statement:
     alter_role_statement:
     - keyword: ALTER
-    - keyword: USER
+    - keyword: ROLE
+    - role_reference:
+        naked_identifier: fred
+    - keyword: IN
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: devel
+    - keyword: SET
+    - parameter: testing
+    - keyword: TO
+    - numeric_literal: '1234'
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - role_reference:
+        naked_identifier: fred
+    - keyword: IN
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: devel
+    - keyword: SET
+    - parameter: testing
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - numeric_literal: '1234'
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - role_reference:
+        naked_identifier: fred
+    - keyword: IN
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: devel
+    - keyword: SET
+    - parameter: testing
+    - keyword: TO
+    - keyword: DEFAULT
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
     - role_reference:
         naked_identifier: fred
     - keyword: IN
     - keyword: DATABASE
-    - object_reference:
+    - database_reference:
         naked_identifier: devel
+    - keyword: SET
+    - parameter: testing
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: DEFAULT
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - role_reference:
+        naked_identifier: fred
+    - keyword: IN
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: devel
+    - keyword: SET
+    - parameter: testing
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - boolean_literal: 'TRUE'
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - role_reference:
+        naked_identifier: fred
+    - keyword: IN
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: devel
+    - keyword: SET
+    - parameter: testing
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - boolean_literal: 'FALSE'
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - role_reference:
+        naked_identifier: fred
+    - keyword: IN
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: devel
+    - keyword: SET
+    - parameter: testing
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'string value'"
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - role_reference:
+        naked_identifier: fred
+    - keyword: IN
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: devel
+    - keyword: SET
+    - parameter: testing
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - naked_identifier: 'on'
+    - comma: ','
+    - naked_identifier: 'off'
+    - comma: ','
+    - naked_identifier: auto
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - role_reference:
+        naked_identifier: fred
     - keyword: RESET
-    - keyword: ALL
+    - parameter: ALL
 - statement_terminator: ;
 - statement:
     alter_role_statement:
     - keyword: ALTER
-    - keyword: USER
+    - keyword: ROLE
     - role_reference:
-        naked_identifier: miriam
-    - keyword: CREATEROLE
-    - keyword: CREATEDB
+        naked_identifier: fred
+    - keyword: RESET
+    - parameter: testing
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - role_reference:
+        naked_identifier: fred
+    - keyword: IN
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: devel
+    - keyword: RESET
+    - parameter: ALL
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - role_reference:
+        naked_identifier: fred
+    - keyword: IN
+    - keyword: DATABASE
+    - database_reference:
+        naked_identifier: devel
+    - keyword: RESET
+    - parameter: testing
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_alter_table.sql b/test/fixtures/dialects/postgres/postgres_alter_table.sql
index 498fc2a..7bb9469 100644
--- a/test/fixtures/dialects/postgres/postgres_alter_table.sql
+++ b/test/fixtures/dialects/postgres/postgres_alter_table.sql
@@ -33,6 +33,8 @@ ALTER TABLE mytable ALTER other_column SET DEFAULT other_value;
 ALTER TABLE mytable ALTER other_column SET DEFAULT CURRENT_TIMESTAMP;
 ALTER TABLE mytable ALTER other_column SET DEFAULT a_function(a_parameter);
 ALTER TABLE mytable ALTER other_column SET DEFAULT a_function('a_parameter');
+ALTER TABLE mytable ALTER other_column SET DEFAULT 1 + 2 + 3;
+ALTER TABLE mytable ALTER other_column SET DEFAULT (1 + 2 + 3);
 ALTER TABLE mytable ALTER other_column DROP DEFAULT;
 ALTER TABLE IF EXISTS mytable ALTER date_column SET DEFAULT NOW();
 ALTER TABLE IF EXISTS mytable ALTER int_column SET DEFAULT 1;
@@ -42,6 +44,8 @@ ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT other_value;
 ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT CURRENT_TIMESTAMP;
 ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT a_function(a_parameter);
 ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT a_function('a_parameter');
+ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT 1 + 2 + 3;
+ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT (1 + 2 + 3);
 ALTER TABLE IF EXISTS mytable ALTER other_column DROP DEFAULT;
 
 ALTER TABLE distributors RENAME COLUMN address TO city;
@@ -136,3 +140,62 @@ ALTER TABLE public.history ALTER COLUMN id ADD GENERATED ALWAYS AS IDENTITY (
     SEQUENCE NAME public.history_id_seq
 );
 
+-- Test adding columns with UNIQUE and PRIMARY KEY constraints
+
+ALTER TABLE tbl
+    ADD COLUMN nulls_distinct text UNIQUE NULLS DISTINCT,
+    ADD COLUMN nulls_not_distinct text UNIQUE NULLS NOT DISTINCT,
+    ADD everything text UNIQUE NULLS DISTINCT WITH (arg1=3, arg5='str') USING INDEX TABLESPACE spc;
+
+ALTER TABLE tbl
+    ADD pk text
+        DEFAULT 'hello'
+        PRIMARY KEY WITH (arg1=3, arg5='str') USING INDEX TABLESPACE tblspace NOT NULL;
+
+ALTER TABLE tbl
+    ADD CONSTRAINT foo1 UNIQUE (fld, col),
+    ADD CONSTRAINT foo2 UNIQUE NULLS DISTINCT (fld),
+    ADD CONSTRAINT foo3 UNIQUE NULLS NOT DISTINCT (fld),
+    ADD CONSTRAINT everything UNIQUE NULLS DISTINCT (fld, col)
+        INCLUDE (two, three)
+        WITH (arg1=3, arg5='str')
+        USING INDEX TABLESPACE tblspc,
+    ADD CONSTRAINT pk PRIMARY KEY (fld, col)
+        INCLUDE (four)
+        WITH (ff=auto, gg=stuff)
+        USING INDEX TABLESPACE tblspc;
+
+-- Test SET/RESET actions on both table and column
+
+ALTER TABLE foo SET (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC);
+ALTER TABLE foo RESET (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC);
+ALTER TABLE foo ALTER COLUMN baz
+    SET (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC);
+ALTER TABLE foo ALTER COLUMN baz
+    RESET (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC);
+
+-- Test out EXCLUDE constraints, as well as other more advanced index parameters on constraints
+
+-- from https://www.postgresql.org/docs/15/rangetypes.html: basic usage (adapted for ALTER TABLE)
+ALTER TABLE reservation ADD EXCLUDE USING gist (during WITH &&);
+ALTER TABLE room_reservation ADD CONSTRAINT cons EXCLUDE USING gist (room WITH =, during WITH &&);
+
+-- all the gnarly options: not every option is valid, but this will parse successfully on PG 15.
+ALTER TABLE no_using ADD EXCLUDE (field WITH =) NOT DEFERRABLE INITIALLY IMMEDIATE NO INHERIT;
+ALTER TABLE many_options ADD EXCLUDE
+    USING gist (
+        one WITH =,
+        nulls_opclass nulls WITH =,
+        nulls_last NULLS LAST WITH =,
+        two COLLATE "en-US" opclass
+            (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC)
+            ASC NULLS FIRST WITH =,
+        (two + 5) WITH =,
+        myfunc(a, b) WITH =,
+        myfunc_opclass(a, b) fop (opt=1, foo=2) WITH =,
+        only_opclass opclass WITH =,
+        desc_order DESC WITH =
+    ) INCLUDE (a, b) WITH (idx_num = 5, idx_str = 'idx_value', idx_kw=DESC)
+        USING INDEX TABLESPACE tblspc
+        WHERE (field != 'def')
+        DEFERRABLE NOT VALID INITIALLY DEFERRED;
diff --git a/test/fixtures/dialects/postgres/postgres_alter_table.yml b/test/fixtures/dialects/postgres/postgres_alter_table.yml
index 483f126..88dba76 100644
--- a/test/fixtures/dialects/postgres/postgres_alter_table.yml
+++ b/test/fixtures/dialects/postgres/postgres_alter_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 41dfc3fcb84393e15f3b50236490fe3e599c6ddb098c8aa1b70ecfeccf168bf1
+_hash: 8e47ff3c0810152cb6f3ddafb3680fabd4e7029d3de173f3fc8514a24b853b96
 file:
 - statement:
     alter_table_statement:
@@ -18,10 +18,11 @@ file:
           naked_identifier: address
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '30'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '30'
+              end_bracket: )
 - statement_terminator: ;
 - statement:
     alter_table_statement:
@@ -62,10 +63,11 @@ file:
           naked_identifier: status
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '30'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '30'
+              end_bracket: )
       - column_constraint_segment:
           keyword: DEFAULT
           quoted_literal: "'old'"
@@ -106,10 +108,11 @@ file:
       - keyword: TYPE
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '80'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '80'
+              end_bracket: )
     - comma: ','
     - alter_table_action_segment:
       - keyword: ALTER
@@ -119,10 +122,11 @@ file:
       - keyword: TYPE
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '100'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '100'
+              end_bracket: )
 - statement_terminator: ;
 - statement:
     alter_table_statement:
@@ -351,6 +355,48 @@ file:
               quoted_literal: "'a_parameter'"
             end_bracket: )
 - statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: mytable
+    - alter_table_action_segment:
+      - keyword: ALTER
+      - column_reference:
+          naked_identifier: other_column
+      - keyword: SET
+      - keyword: DEFAULT
+      - expression:
+        - numeric_literal: '1'
+        - binary_operator: +
+        - numeric_literal: '2'
+        - binary_operator: +
+        - numeric_literal: '3'
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: mytable
+    - alter_table_action_segment:
+      - keyword: ALTER
+      - column_reference:
+          naked_identifier: other_column
+      - keyword: SET
+      - keyword: DEFAULT
+      - expression:
+          bracketed:
+            start_bracket: (
+            expression:
+            - numeric_literal: '1'
+            - binary_operator: +
+            - numeric_literal: '2'
+            - binary_operator: +
+            - numeric_literal: '3'
+            end_bracket: )
+- statement_terminator: ;
 - statement:
     alter_table_statement:
     - keyword: ALTER
@@ -514,6 +560,52 @@ file:
               quoted_literal: "'a_parameter'"
             end_bracket: )
 - statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - keyword: IF
+    - keyword: EXISTS
+    - table_reference:
+        naked_identifier: mytable
+    - alter_table_action_segment:
+      - keyword: ALTER
+      - column_reference:
+          naked_identifier: other_column
+      - keyword: SET
+      - keyword: DEFAULT
+      - expression:
+        - numeric_literal: '1'
+        - binary_operator: +
+        - numeric_literal: '2'
+        - binary_operator: +
+        - numeric_literal: '3'
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - keyword: IF
+    - keyword: EXISTS
+    - table_reference:
+        naked_identifier: mytable
+    - alter_table_action_segment:
+      - keyword: ALTER
+      - column_reference:
+          naked_identifier: other_column
+      - keyword: SET
+      - keyword: DEFAULT
+      - expression:
+          bracketed:
+            start_bracket: (
+            expression:
+            - numeric_literal: '1'
+            - binary_operator: +
+            - numeric_literal: '2'
+            - binary_operator: +
+            - numeric_literal: '3'
+            end_bracket: )
+- statement_terminator: ;
 - statement:
     alter_table_statement:
     - keyword: ALTER
@@ -791,8 +883,8 @@ file:
     - table_reference:
         naked_identifier: distributors
     - alter_table_action_segment:
-      - keyword: ADD
-      - table_constraint:
+        keyword: ADD
+        table_constraint:
         - keyword: CONSTRAINT
         - object_reference:
             naked_identifier: distfk
@@ -811,8 +903,8 @@ file:
             column_reference:
               naked_identifier: address
             end_bracket: )
-      - keyword: NOT
-      - keyword: VALID
+        - keyword: NOT
+        - keyword: VALID
 - statement_terminator: ;
 - statement:
     alter_table_statement:
@@ -884,13 +976,15 @@ file:
         naked_identifier: distributors
     - alter_table_action_segment:
         keyword: SET
-        bracketed:
-          start_bracket: (
-          parameter: parameter_1
-          comparison_operator:
-            raw_comparison_operator: '='
-          quoted_literal: "'value'"
-          end_bracket: )
+        relation_options:
+          bracketed:
+            start_bracket: (
+            relation_option:
+              properties_naked_identifier: parameter_1
+              comparison_operator:
+                raw_comparison_operator: '='
+              quoted_literal: "'value'"
+            end_bracket: )
 - statement_terminator: ;
 - statement:
     alter_table_statement:
@@ -900,13 +994,15 @@ file:
         naked_identifier: distributors
     - alter_table_action_segment:
         keyword: SET
-        bracketed:
-          start_bracket: (
-          parameter: parameter_1
-          comparison_operator:
-            raw_comparison_operator: '='
-          numeric_literal: '1'
-          end_bracket: )
+        relation_options:
+          bracketed:
+            start_bracket: (
+            relation_option:
+              properties_naked_identifier: parameter_1
+              comparison_operator:
+                raw_comparison_operator: '='
+              numeric_literal: '1'
+            end_bracket: )
 - statement_terminator: ;
 - statement:
     alter_table_statement:
@@ -916,18 +1012,21 @@ file:
         naked_identifier: distributors
     - alter_table_action_segment:
         keyword: SET
-        bracketed:
-        - start_bracket: (
-        - parameter: parameter_1
-        - comparison_operator:
-            raw_comparison_operator: '='
-        - numeric_literal: '1'
-        - comma: ','
-        - parameter: parameter_2
-        - comparison_operator:
-            raw_comparison_operator: '='
-        - quoted_literal: "'value'"
-        - end_bracket: )
+        relation_options:
+          bracketed:
+          - start_bracket: (
+          - relation_option:
+              properties_naked_identifier: parameter_1
+              comparison_operator:
+                raw_comparison_operator: '='
+              numeric_literal: '1'
+          - comma: ','
+          - relation_option:
+              properties_naked_identifier: parameter_2
+              comparison_operator:
+                raw_comparison_operator: '='
+              quoted_literal: "'value'"
+          - end_bracket: )
 - statement_terminator: ;
 - statement:
     alter_table_statement:
@@ -1276,3 +1375,820 @@ file:
             - naked_identifier: history_id_seq
           end_bracket: )
 - statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: tbl
+    - alter_table_action_segment:
+      - keyword: ADD
+      - keyword: COLUMN
+      - column_reference:
+          naked_identifier: nulls_distinct
+      - data_type:
+          keyword: text
+      - column_constraint_segment:
+        - keyword: UNIQUE
+        - keyword: NULLS
+        - keyword: DISTINCT
+    - comma: ','
+    - alter_table_action_segment:
+      - keyword: ADD
+      - keyword: COLUMN
+      - column_reference:
+          naked_identifier: nulls_not_distinct
+      - data_type:
+          keyword: text
+      - column_constraint_segment:
+        - keyword: UNIQUE
+        - keyword: NULLS
+        - keyword: NOT
+        - keyword: DISTINCT
+    - comma: ','
+    - alter_table_action_segment:
+        keyword: ADD
+        column_reference:
+          naked_identifier: everything
+        data_type:
+          keyword: text
+        column_constraint_segment:
+        - keyword: UNIQUE
+        - keyword: NULLS
+        - keyword: DISTINCT
+        - keyword: WITH
+        - definition_parameters:
+            bracketed:
+            - start_bracket: (
+            - definition_parameter:
+                properties_naked_identifier: arg1
+                comparison_operator:
+                  raw_comparison_operator: '='
+                numeric_literal: '3'
+            - comma: ','
+            - definition_parameter:
+                properties_naked_identifier: arg5
+                comparison_operator:
+                  raw_comparison_operator: '='
+                quoted_literal: "'str'"
+            - end_bracket: )
+        - keyword: USING
+        - keyword: INDEX
+        - keyword: TABLESPACE
+        - tablespace_reference:
+            naked_identifier: spc
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: tbl
+    - alter_table_action_segment:
+      - keyword: ADD
+      - column_reference:
+          naked_identifier: pk
+      - data_type:
+          keyword: text
+      - column_constraint_segment:
+          keyword: DEFAULT
+          quoted_literal: "'hello'"
+      - column_constraint_segment:
+        - keyword: PRIMARY
+        - keyword: KEY
+        - keyword: WITH
+        - definition_parameters:
+            bracketed:
+            - start_bracket: (
+            - definition_parameter:
+                properties_naked_identifier: arg1
+                comparison_operator:
+                  raw_comparison_operator: '='
+                numeric_literal: '3'
+            - comma: ','
+            - definition_parameter:
+                properties_naked_identifier: arg5
+                comparison_operator:
+                  raw_comparison_operator: '='
+                quoted_literal: "'str'"
+            - end_bracket: )
+        - keyword: USING
+        - keyword: INDEX
+        - keyword: TABLESPACE
+        - tablespace_reference:
+            naked_identifier: tblspace
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: tbl
+    - alter_table_action_segment:
+        keyword: ADD
+        table_constraint:
+        - keyword: CONSTRAINT
+        - object_reference:
+            naked_identifier: foo1
+        - keyword: UNIQUE
+        - bracketed:
+          - start_bracket: (
+          - column_reference:
+              naked_identifier: fld
+          - comma: ','
+          - column_reference:
+              naked_identifier: col
+          - end_bracket: )
+    - comma: ','
+    - alter_table_action_segment:
+        keyword: ADD
+        table_constraint:
+        - keyword: CONSTRAINT
+        - object_reference:
+            naked_identifier: foo2
+        - keyword: UNIQUE
+        - keyword: NULLS
+        - keyword: DISTINCT
+        - bracketed:
+            start_bracket: (
+            column_reference:
+              naked_identifier: fld
+            end_bracket: )
+    - comma: ','
+    - alter_table_action_segment:
+        keyword: ADD
+        table_constraint:
+        - keyword: CONSTRAINT
+        - object_reference:
+            naked_identifier: foo3
+        - keyword: UNIQUE
+        - keyword: NULLS
+        - keyword: NOT
+        - keyword: DISTINCT
+        - bracketed:
+            start_bracket: (
+            column_reference:
+              naked_identifier: fld
+            end_bracket: )
+    - comma: ','
+    - alter_table_action_segment:
+        keyword: ADD
+        table_constraint:
+        - keyword: CONSTRAINT
+        - object_reference:
+            naked_identifier: everything
+        - keyword: UNIQUE
+        - keyword: NULLS
+        - keyword: DISTINCT
+        - bracketed:
+          - start_bracket: (
+          - column_reference:
+              naked_identifier: fld
+          - comma: ','
+          - column_reference:
+              naked_identifier: col
+          - end_bracket: )
+        - index_parameters:
+          - keyword: INCLUDE
+          - bracketed:
+            - start_bracket: (
+            - column_reference:
+                naked_identifier: two
+            - comma: ','
+            - column_reference:
+                naked_identifier: three
+            - end_bracket: )
+          - keyword: WITH
+          - definition_parameters:
+              bracketed:
+              - start_bracket: (
+              - definition_parameter:
+                  properties_naked_identifier: arg1
+                  comparison_operator:
+                    raw_comparison_operator: '='
+                  numeric_literal: '3'
+              - comma: ','
+              - definition_parameter:
+                  properties_naked_identifier: arg5
+                  comparison_operator:
+                    raw_comparison_operator: '='
+                  quoted_literal: "'str'"
+              - end_bracket: )
+          - keyword: USING
+          - keyword: INDEX
+          - keyword: TABLESPACE
+          - tablespace_reference:
+              naked_identifier: tblspc
+    - comma: ','
+    - alter_table_action_segment:
+        keyword: ADD
+        table_constraint:
+        - keyword: CONSTRAINT
+        - object_reference:
+            naked_identifier: pk
+        - keyword: PRIMARY
+        - keyword: KEY
+        - bracketed:
+          - start_bracket: (
+          - column_reference:
+              naked_identifier: fld
+          - comma: ','
+          - column_reference:
+              naked_identifier: col
+          - end_bracket: )
+        - index_parameters:
+          - keyword: INCLUDE
+          - bracketed:
+              start_bracket: (
+              column_reference:
+                naked_identifier: four
+              end_bracket: )
+          - keyword: WITH
+          - definition_parameters:
+              bracketed:
+              - start_bracket: (
+              - definition_parameter:
+                - properties_naked_identifier: ff
+                - comparison_operator:
+                    raw_comparison_operator: '='
+                - properties_naked_identifier: auto
+              - comma: ','
+              - definition_parameter:
+                - properties_naked_identifier: gg
+                - comparison_operator:
+                    raw_comparison_operator: '='
+                - properties_naked_identifier: stuff
+              - end_bracket: )
+          - keyword: USING
+          - keyword: INDEX
+          - keyword: TABLESPACE
+          - tablespace_reference:
+              naked_identifier: tblspc
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: foo
+    - alter_table_action_segment:
+        keyword: SET
+        relation_options:
+          bracketed:
+          - start_bracket: (
+          - relation_option:
+              properties_naked_identifier: opt1
+          - comma: ','
+          - relation_option:
+              properties_naked_identifier: opt2
+              comparison_operator:
+                raw_comparison_operator: '='
+              numeric_literal: '5'
+          - comma: ','
+          - relation_option:
+              properties_naked_identifier: opt3
+              comparison_operator:
+                raw_comparison_operator: '='
+              quoted_literal: "'str'"
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: ns
+            - dot: .
+            - properties_naked_identifier: opt4
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: ns
+            - dot: .
+            - properties_naked_identifier: opt5
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - numeric_literal: '6'
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: ns
+            - dot: .
+            - properties_naked_identifier: opt6
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - quoted_literal: "'str'"
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: opt7
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - properties_naked_identifier: ASC
+          - end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: foo
+    - alter_table_action_segment:
+        keyword: RESET
+        relation_options:
+          bracketed:
+          - start_bracket: (
+          - relation_option:
+              properties_naked_identifier: opt1
+          - comma: ','
+          - relation_option:
+              properties_naked_identifier: opt2
+              comparison_operator:
+                raw_comparison_operator: '='
+              numeric_literal: '5'
+          - comma: ','
+          - relation_option:
+              properties_naked_identifier: opt3
+              comparison_operator:
+                raw_comparison_operator: '='
+              quoted_literal: "'str'"
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: ns
+            - dot: .
+            - properties_naked_identifier: opt4
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: ns
+            - dot: .
+            - properties_naked_identifier: opt5
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - numeric_literal: '6'
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: ns
+            - dot: .
+            - properties_naked_identifier: opt6
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - quoted_literal: "'str'"
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: opt7
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - properties_naked_identifier: ASC
+          - end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: foo
+    - alter_table_action_segment:
+      - keyword: ALTER
+      - keyword: COLUMN
+      - column_reference:
+          naked_identifier: baz
+      - keyword: SET
+      - relation_options:
+          bracketed:
+          - start_bracket: (
+          - relation_option:
+              properties_naked_identifier: opt1
+          - comma: ','
+          - relation_option:
+              properties_naked_identifier: opt2
+              comparison_operator:
+                raw_comparison_operator: '='
+              numeric_literal: '5'
+          - comma: ','
+          - relation_option:
+              properties_naked_identifier: opt3
+              comparison_operator:
+                raw_comparison_operator: '='
+              quoted_literal: "'str'"
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: ns
+            - dot: .
+            - properties_naked_identifier: opt4
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: ns
+            - dot: .
+            - properties_naked_identifier: opt5
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - numeric_literal: '6'
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: ns
+            - dot: .
+            - properties_naked_identifier: opt6
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - quoted_literal: "'str'"
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: opt7
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - properties_naked_identifier: ASC
+          - end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: foo
+    - alter_table_action_segment:
+      - keyword: ALTER
+      - keyword: COLUMN
+      - column_reference:
+          naked_identifier: baz
+      - keyword: RESET
+      - relation_options:
+          bracketed:
+          - start_bracket: (
+          - relation_option:
+              properties_naked_identifier: opt1
+          - comma: ','
+          - relation_option:
+              properties_naked_identifier: opt2
+              comparison_operator:
+                raw_comparison_operator: '='
+              numeric_literal: '5'
+          - comma: ','
+          - relation_option:
+              properties_naked_identifier: opt3
+              comparison_operator:
+                raw_comparison_operator: '='
+              quoted_literal: "'str'"
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: ns
+            - dot: .
+            - properties_naked_identifier: opt4
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: ns
+            - dot: .
+            - properties_naked_identifier: opt5
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - numeric_literal: '6'
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: ns
+            - dot: .
+            - properties_naked_identifier: opt6
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - quoted_literal: "'str'"
+          - comma: ','
+          - relation_option:
+            - properties_naked_identifier: opt7
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - properties_naked_identifier: ASC
+          - end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: reservation
+    - alter_table_action_segment:
+        keyword: ADD
+        table_constraint:
+        - keyword: EXCLUDE
+        - keyword: USING
+        - index_access_method:
+            naked_identifier: gist
+        - bracketed:
+            start_bracket: (
+            exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: during
+              keyword: WITH
+              comparison_operator:
+              - ampersand: '&'
+              - ampersand: '&'
+            end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: room_reservation
+    - alter_table_action_segment:
+        keyword: ADD
+        table_constraint:
+        - keyword: CONSTRAINT
+        - object_reference:
+            naked_identifier: cons
+        - keyword: EXCLUDE
+        - keyword: USING
+        - index_access_method:
+            naked_identifier: gist
+        - bracketed:
+          - start_bracket: (
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: room
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: during
+              keyword: WITH
+              comparison_operator:
+              - ampersand: '&'
+              - ampersand: '&'
+          - end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: no_using
+    - alter_table_action_segment:
+        keyword: ADD
+        table_constraint:
+        - keyword: EXCLUDE
+        - bracketed:
+            start_bracket: (
+            exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: field
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+            end_bracket: )
+        - keyword: NOT
+        - keyword: DEFERRABLE
+        - keyword: INITIALLY
+        - keyword: IMMEDIATE
+        - keyword: 'NO'
+        - keyword: INHERIT
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: many_options
+    - alter_table_action_segment:
+        keyword: ADD
+        table_constraint:
+        - keyword: EXCLUDE
+        - keyword: USING
+        - index_access_method:
+            naked_identifier: gist
+        - bracketed:
+          - start_bracket: (
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: one
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: nulls_opclass
+                index_element_options:
+                  operator_class_reference:
+                    naked_identifier: nulls
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: nulls_last
+                index_element_options:
+                - keyword: NULLS
+                - keyword: LAST
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: two
+                index_element_options:
+                - keyword: COLLATE
+                - collation_reference:
+                    quoted_identifier: '"en-US"'
+                - operator_class_reference:
+                    naked_identifier: opclass
+                - relation_options:
+                    bracketed:
+                    - start_bracket: (
+                    - relation_option:
+                        properties_naked_identifier: opt1
+                    - comma: ','
+                    - relation_option:
+                        properties_naked_identifier: opt2
+                        comparison_operator:
+                          raw_comparison_operator: '='
+                        numeric_literal: '5'
+                    - comma: ','
+                    - relation_option:
+                        properties_naked_identifier: opt3
+                        comparison_operator:
+                          raw_comparison_operator: '='
+                        quoted_literal: "'str'"
+                    - comma: ','
+                    - relation_option:
+                      - properties_naked_identifier: ns
+                      - dot: .
+                      - properties_naked_identifier: opt4
+                    - comma: ','
+                    - relation_option:
+                      - properties_naked_identifier: ns
+                      - dot: .
+                      - properties_naked_identifier: opt5
+                      - comparison_operator:
+                          raw_comparison_operator: '='
+                      - numeric_literal: '6'
+                    - comma: ','
+                    - relation_option:
+                      - properties_naked_identifier: ns
+                      - dot: .
+                      - properties_naked_identifier: opt6
+                      - comparison_operator:
+                          raw_comparison_operator: '='
+                      - quoted_literal: "'str'"
+                    - comma: ','
+                    - relation_option:
+                      - properties_naked_identifier: opt7
+                      - comparison_operator:
+                          raw_comparison_operator: '='
+                      - properties_naked_identifier: ASC
+                    - end_bracket: )
+                - keyword: ASC
+                - keyword: NULLS
+                - keyword: FIRST
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    column_reference:
+                      naked_identifier: two
+                    binary_operator: +
+                    numeric_literal: '5'
+                  end_bracket: )
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                function:
+                  function_name:
+                    function_name_identifier: myfunc
+                  bracketed:
+                  - start_bracket: (
+                  - expression:
+                      column_reference:
+                        naked_identifier: a
+                  - comma: ','
+                  - expression:
+                      column_reference:
+                        naked_identifier: b
+                  - end_bracket: )
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                function:
+                  function_name:
+                    function_name_identifier: myfunc_opclass
+                  bracketed:
+                  - start_bracket: (
+                  - expression:
+                      column_reference:
+                        naked_identifier: a
+                  - comma: ','
+                  - expression:
+                      column_reference:
+                        naked_identifier: b
+                  - end_bracket: )
+                index_element_options:
+                  operator_class_reference:
+                    naked_identifier: fop
+                  relation_options:
+                    bracketed:
+                    - start_bracket: (
+                    - relation_option:
+                        properties_naked_identifier: opt
+                        comparison_operator:
+                          raw_comparison_operator: '='
+                        numeric_literal: '1'
+                    - comma: ','
+                    - relation_option:
+                        properties_naked_identifier: foo
+                        comparison_operator:
+                          raw_comparison_operator: '='
+                        numeric_literal: '2'
+                    - end_bracket: )
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: only_opclass
+                index_element_options:
+                  operator_class_reference:
+                    naked_identifier: opclass
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: desc_order
+                index_element_options:
+                  keyword: DESC
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - end_bracket: )
+        - index_parameters:
+          - keyword: INCLUDE
+          - bracketed:
+            - start_bracket: (
+            - column_reference:
+                naked_identifier: a
+            - comma: ','
+            - column_reference:
+                naked_identifier: b
+            - end_bracket: )
+          - keyword: WITH
+          - definition_parameters:
+              bracketed:
+              - start_bracket: (
+              - definition_parameter:
+                  properties_naked_identifier: idx_num
+                  comparison_operator:
+                    raw_comparison_operator: '='
+                  numeric_literal: '5'
+              - comma: ','
+              - definition_parameter:
+                  properties_naked_identifier: idx_str
+                  comparison_operator:
+                    raw_comparison_operator: '='
+                  quoted_literal: "'idx_value'"
+              - comma: ','
+              - definition_parameter:
+                - properties_naked_identifier: idx_kw
+                - comparison_operator:
+                    raw_comparison_operator: '='
+                - properties_naked_identifier: DESC
+              - end_bracket: )
+          - keyword: USING
+          - keyword: INDEX
+          - keyword: TABLESPACE
+          - tablespace_reference:
+              naked_identifier: tblspc
+        - keyword: WHERE
+        - bracketed:
+            start_bracket: (
+            expression:
+              column_reference:
+                naked_identifier: field
+              comparison_operator:
+              - raw_comparison_operator: '!'
+              - raw_comparison_operator: '='
+              quoted_literal: "'def'"
+            end_bracket: )
+        - keyword: DEFERRABLE
+        - keyword: NOT
+        - keyword: VALID
+        - keyword: INITIALLY
+        - keyword: DEFERRED
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_array.sql b/test/fixtures/dialects/postgres/postgres_array.sql
index 31da3c2..e5ce89b 100644
--- a/test/fixtures/dialects/postgres/postgres_array.sql
+++ b/test/fixtures/dialects/postgres/postgres_array.sql
@@ -53,4 +53,10 @@ SELECT SUM(CASE
         ELSE 0
         END
     ) * (MAX(ARRAY[id, vertical]))[2]
-FROM direction_with_vertical_change
+FROM direction_with_vertical_change;
+
+-- More advanced cases with expressions and missing slice start/end when accessing
+
+SELECT a[:], b[:1], c[2:], d[2:3];
+
+SELECT a[1+2:3+4], b[5+6];
diff --git a/test/fixtures/dialects/postgres/postgres_array.yml b/test/fixtures/dialects/postgres/postgres_array.yml
index 00f4f9f..31b40d7 100644
--- a/test/fixtures/dialects/postgres/postgres_array.yml
+++ b/test/fixtures/dialects/postgres/postgres_array.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 2de80fba7147430d59c89a94ff122d4f4da8d4e6ac743e44b7ffd53fa66c3098
+_hash: fa7d0f6f980261224b84a16686a50389be1004eb1a613794a20222205208f836
 file:
 - statement:
     select_statement:
@@ -11,46 +11,52 @@ file:
         keyword: SELECT
         select_clause_element:
           expression:
-          - array_literal:
-            - keyword: ARRAY
-            - start_square_bracket: '['
-            - numeric_literal: '1'
-            - comma: ','
-            - numeric_literal: '2'
-            - end_square_bracket: ']'
+          - typed_array_literal:
+              array_type:
+                keyword: ARRAY
+              array_literal:
+              - start_square_bracket: '['
+              - numeric_literal: '1'
+              - comma: ','
+              - numeric_literal: '2'
+              - end_square_bracket: ']'
           - binary_operator:
             - pipe: '|'
             - pipe: '|'
-          - array_literal:
-            - keyword: ARRAY
-            - start_square_bracket: '['
-            - numeric_literal: '3'
-            - comma: ','
-            - numeric_literal: '4'
-            - end_square_bracket: ']'
+          - typed_array_literal:
+              array_type:
+                keyword: ARRAY
+              array_literal:
+              - start_square_bracket: '['
+              - numeric_literal: '3'
+              - comma: ','
+              - numeric_literal: '4'
+              - end_square_bracket: ']'
 - statement_terminator: ;
 - statement:
     select_statement:
       select_clause:
         keyword: SELECT
         select_clause_element:
-          array_literal:
-          - keyword: ARRAY
-          - start_square_bracket: '['
-          - array_literal:
-            - start_square_bracket: '['
-            - quoted_literal: "'meeting'"
-            - comma: ','
-            - quoted_literal: "'lunch'"
-            - end_square_bracket: ']'
-          - comma: ','
-          - array_literal:
+          typed_array_literal:
+            array_type:
+              keyword: ARRAY
+            array_literal:
             - start_square_bracket: '['
-            - quoted_literal: "'training'"
+            - array_literal:
+              - start_square_bracket: '['
+              - quoted_literal: "'meeting'"
+              - comma: ','
+              - quoted_literal: "'lunch'"
+              - end_square_bracket: ']'
             - comma: ','
-            - quoted_literal: "'presentation'"
+            - array_literal:
+              - start_square_bracket: '['
+              - quoted_literal: "'training'"
+              - comma: ','
+              - quoted_literal: "'presentation'"
+              - end_square_bracket: ']'
             - end_square_bracket: ']'
-          - end_square_bracket: ']'
 - statement_terminator: ;
 - statement:
     create_table_statement:
@@ -176,36 +182,40 @@ file:
             quoted_literal: "'Bill'"
         - comma: ','
         - expression:
-            array_literal:
-            - keyword: ARRAY
-            - start_square_bracket: '['
-            - numeric_literal: '10000'
-            - comma: ','
-            - numeric_literal: '10000'
-            - comma: ','
-            - numeric_literal: '10000'
-            - comma: ','
-            - numeric_literal: '10000'
-            - end_square_bracket: ']'
-        - comma: ','
-        - expression:
-            array_literal:
-            - keyword: ARRAY
-            - start_square_bracket: '['
-            - array_literal:
+            typed_array_literal:
+              array_type:
+                keyword: ARRAY
+              array_literal:
               - start_square_bracket: '['
-              - quoted_literal: "'meeting'"
+              - numeric_literal: '10000'
               - comma: ','
-              - quoted_literal: "'lunch'"
+              - numeric_literal: '10000'
+              - comma: ','
+              - numeric_literal: '10000'
+              - comma: ','
+              - numeric_literal: '10000'
               - end_square_bracket: ']'
-            - comma: ','
-            - array_literal:
+        - comma: ','
+        - expression:
+            typed_array_literal:
+              array_type:
+                keyword: ARRAY
+              array_literal:
               - start_square_bracket: '['
-              - quoted_literal: "'training'"
+              - array_literal:
+                - start_square_bracket: '['
+                - quoted_literal: "'meeting'"
+                - comma: ','
+                - quoted_literal: "'lunch'"
+                - end_square_bracket: ']'
               - comma: ','
-              - quoted_literal: "'presentation'"
+              - array_literal:
+                - start_square_bracket: '['
+                - quoted_literal: "'training'"
+                - comma: ','
+                - quoted_literal: "'presentation'"
+                - end_square_bracket: ']'
               - end_square_bracket: ']'
-            - end_square_bracket: ']'
         - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -222,36 +232,40 @@ file:
             quoted_literal: "'Carol'"
         - comma: ','
         - expression:
-            array_literal:
-            - keyword: ARRAY
-            - start_square_bracket: '['
-            - numeric_literal: '20000'
-            - comma: ','
-            - numeric_literal: '25000'
-            - comma: ','
-            - numeric_literal: '25000'
-            - comma: ','
-            - numeric_literal: '25000'
-            - end_square_bracket: ']'
-        - comma: ','
-        - expression:
-            array_literal:
-            - keyword: ARRAY
-            - start_square_bracket: '['
-            - array_literal:
+            typed_array_literal:
+              array_type:
+                keyword: ARRAY
+              array_literal:
               - start_square_bracket: '['
-              - quoted_literal: "'breakfast'"
+              - numeric_literal: '20000'
+              - comma: ','
+              - numeric_literal: '25000'
               - comma: ','
-              - quoted_literal: "'consulting'"
+              - numeric_literal: '25000'
+              - comma: ','
+              - numeric_literal: '25000'
               - end_square_bracket: ']'
-            - comma: ','
-            - array_literal:
+        - comma: ','
+        - expression:
+            typed_array_literal:
+              array_type:
+                keyword: ARRAY
+              array_literal:
               - start_square_bracket: '['
-              - quoted_literal: "'meeting'"
+              - array_literal:
+                - start_square_bracket: '['
+                - quoted_literal: "'breakfast'"
+                - comma: ','
+                - quoted_literal: "'consulting'"
+                - end_square_bracket: ']'
               - comma: ','
-              - quoted_literal: "'lunch'"
+              - array_literal:
+                - start_square_bracket: '['
+                - quoted_literal: "'meeting'"
+                - comma: ','
+                - quoted_literal: "'lunch'"
+                - end_square_bracket: ']'
               - end_square_bracket: ']'
-            - end_square_bracket: ']'
         - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -293,14 +307,15 @@ file:
         keyword: SELECT
         select_clause_element:
           expression:
-            column_reference:
+          - column_reference:
               naked_identifier: schedule
-            array_accessor:
+          - array_accessor:
             - start_square_bracket: '['
             - numeric_literal: '1'
             - slice: ':'
             - numeric_literal: '2'
             - end_square_bracket: ']'
+          - array_accessor:
             - start_square_bracket: '['
             - numeric_literal: '1'
             - slice: ':'
@@ -387,25 +402,29 @@ file:
             bracketed:
               start_bracket: (
               expression:
-              - array_literal:
-                - keyword: ARRAY
-                - start_square_bracket: '['
-                - numeric_literal: '1'
-                - comma: ','
-                - numeric_literal: '2'
-                - end_square_bracket: ']'
+              - typed_array_literal:
+                  array_type:
+                    keyword: ARRAY
+                  array_literal:
+                  - start_square_bracket: '['
+                  - numeric_literal: '1'
+                  - comma: ','
+                  - numeric_literal: '2'
+                  - end_square_bracket: ']'
               - binary_operator:
                 - pipe: '|'
                 - pipe: '|'
-              - array_literal:
-                - keyword: ARRAY
-                - start_square_bracket: '['
-                - numeric_literal: '3'
-                - comma: ','
-                - numeric_literal: '4'
-                - comma: ','
-                - numeric_literal: '5'
-                - end_square_bracket: ']'
+              - typed_array_literal:
+                  array_type:
+                    keyword: ARRAY
+                  array_literal:
+                  - start_square_bracket: '['
+                  - numeric_literal: '3'
+                  - comma: ','
+                  - numeric_literal: '4'
+                  - comma: ','
+                  - numeric_literal: '5'
+                  - end_square_bracket: ']'
               end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -419,33 +438,37 @@ file:
             bracketed:
               start_bracket: (
               expression:
-              - array_literal:
-                - keyword: ARRAY
-                - start_square_bracket: '['
-                - numeric_literal: '1'
-                - comma: ','
-                - numeric_literal: '2'
-                - end_square_bracket: ']'
-              - binary_operator:
-                - pipe: '|'
-                - pipe: '|'
-              - array_literal:
-                - keyword: ARRAY
-                - start_square_bracket: '['
-                - array_literal:
+              - typed_array_literal:
+                  array_type:
+                    keyword: ARRAY
+                  array_literal:
                   - start_square_bracket: '['
-                  - numeric_literal: '3'
+                  - numeric_literal: '1'
                   - comma: ','
-                  - numeric_literal: '4'
+                  - numeric_literal: '2'
                   - end_square_bracket: ']'
-                - comma: ','
-                - array_literal:
+              - binary_operator:
+                - pipe: '|'
+                - pipe: '|'
+              - typed_array_literal:
+                  array_type:
+                    keyword: ARRAY
+                  array_literal:
                   - start_square_bracket: '['
-                  - numeric_literal: '5'
+                  - array_literal:
+                    - start_square_bracket: '['
+                    - numeric_literal: '3'
+                    - comma: ','
+                    - numeric_literal: '4'
+                    - end_square_bracket: ']'
                   - comma: ','
-                  - numeric_literal: '6'
+                  - array_literal:
+                    - start_square_bracket: '['
+                    - numeric_literal: '5'
+                    - comma: ','
+                    - numeric_literal: '6'
+                    - end_square_bracket: ']'
                   - end_square_bracket: ']'
-                - end_square_bracket: ']'
               end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -454,13 +477,15 @@ file:
         keyword: SELECT
         select_clause_element:
           expression:
-            array_literal:
-            - keyword: ARRAY
-            - start_square_bracket: '['
-            - numeric_literal: '1'
-            - comma: ','
-            - numeric_literal: '2'
-            - end_square_bracket: ']'
+            typed_array_literal:
+              array_type:
+                keyword: ARRAY
+              array_literal:
+              - start_square_bracket: '['
+              - numeric_literal: '1'
+              - comma: ','
+              - numeric_literal: '2'
+              - end_square_bracket: ']'
             binary_operator:
             - pipe: '|'
             - pipe: '|'
@@ -477,23 +502,25 @@ file:
             bracketed:
             - start_bracket: (
             - expression:
-                array_literal:
-                - keyword: ARRAY
-                - start_square_bracket: '['
-                - quoted_literal: "'sun'"
-                - comma: ','
-                - quoted_literal: "'mon'"
-                - comma: ','
-                - quoted_literal: "'tue'"
-                - comma: ','
-                - quoted_literal: "'wed'"
-                - comma: ','
-                - quoted_literal: "'thu'"
-                - comma: ','
-                - quoted_literal: "'fri'"
-                - comma: ','
-                - quoted_literal: "'sat'"
-                - end_square_bracket: ']'
+                typed_array_literal:
+                  array_type:
+                    keyword: ARRAY
+                  array_literal:
+                  - start_square_bracket: '['
+                  - quoted_literal: "'sun'"
+                  - comma: ','
+                  - quoted_literal: "'mon'"
+                  - comma: ','
+                  - quoted_literal: "'tue'"
+                  - comma: ','
+                  - quoted_literal: "'wed'"
+                  - comma: ','
+                  - quoted_literal: "'thu'"
+                  - comma: ','
+                  - quoted_literal: "'fri'"
+                  - comma: ','
+                  - quoted_literal: "'sat'"
+                  - end_square_bracket: ']'
             - comma: ','
             - expression:
                 quoted_literal: "'mon'"
@@ -505,40 +532,44 @@ file:
       - keyword: SELECT
       - select_clause_element:
           expression:
-            column_reference:
+          - column_reference:
               naked_identifier: f1
-            array_accessor:
-            - start_square_bracket: '['
-            - numeric_literal: '1'
-            - end_square_bracket: ']'
-            - start_square_bracket: '['
-            - numeric_literal:
+          - array_accessor:
+              start_square_bracket: '['
+              numeric_literal: '1'
+              end_square_bracket: ']'
+          - array_accessor:
+              start_square_bracket: '['
+              numeric_literal:
                 sign_indicator: '-'
                 numeric_literal: '2'
-            - end_square_bracket: ']'
-            - start_square_bracket: '['
-            - numeric_literal: '3'
-            - end_square_bracket: ']'
+              end_square_bracket: ']'
+          - array_accessor:
+              start_square_bracket: '['
+              numeric_literal: '3'
+              end_square_bracket: ']'
           alias_expression:
             keyword: AS
             naked_identifier: e1
       - comma: ','
       - select_clause_element:
           expression:
-            column_reference:
+          - column_reference:
               naked_identifier: f1
-            array_accessor:
-            - start_square_bracket: '['
-            - numeric_literal: '1'
-            - end_square_bracket: ']'
-            - start_square_bracket: '['
-            - numeric_literal:
+          - array_accessor:
+              start_square_bracket: '['
+              numeric_literal: '1'
+              end_square_bracket: ']'
+          - array_accessor:
+              start_square_bracket: '['
+              numeric_literal:
                 sign_indicator: '-'
                 numeric_literal: '1'
-            - end_square_bracket: ']'
-            - start_square_bracket: '['
-            - numeric_literal: '5'
-            - end_square_bracket: ']'
+              end_square_bracket: ']'
+          - array_accessor:
+              start_square_bracket: '['
+              numeric_literal: '5'
+              end_square_bracket: ']'
           alias_expression:
             keyword: AS
             naked_identifier: e2
@@ -611,15 +642,17 @@ file:
                   bracketed:
                     start_bracket: (
                     expression:
-                      array_literal:
-                      - keyword: ARRAY
-                      - start_square_bracket: '['
-                      - column_reference:
-                          naked_identifier: id
-                      - comma: ','
-                      - column_reference:
-                          naked_identifier: vertical
-                      - end_square_bracket: ']'
+                      typed_array_literal:
+                        array_type:
+                          keyword: ARRAY
+                        array_literal:
+                        - start_square_bracket: '['
+                        - column_reference:
+                            naked_identifier: id
+                        - comma: ','
+                        - column_reference:
+                            naked_identifier: vertical
+                        - end_square_bracket: ']'
                     end_bracket: )
               end_bracket: )
             array_accessor:
@@ -633,3 +666,81 @@ file:
             table_expression:
               table_reference:
                 naked_identifier: direction_with_vertical_change
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          expression:
+            column_reference:
+              naked_identifier: a
+            array_accessor:
+              start_square_bracket: '['
+              slice: ':'
+              end_square_bracket: ']'
+      - comma: ','
+      - select_clause_element:
+          expression:
+            column_reference:
+              naked_identifier: b
+            array_accessor:
+              start_square_bracket: '['
+              slice: ':'
+              numeric_literal: '1'
+              end_square_bracket: ']'
+      - comma: ','
+      - select_clause_element:
+          expression:
+            column_reference:
+              naked_identifier: c
+            array_accessor:
+              start_square_bracket: '['
+              numeric_literal: '2'
+              slice: ':'
+              end_square_bracket: ']'
+      - comma: ','
+      - select_clause_element:
+          expression:
+            column_reference:
+              naked_identifier: d
+            array_accessor:
+            - start_square_bracket: '['
+            - numeric_literal: '2'
+            - slice: ':'
+            - numeric_literal: '3'
+            - end_square_bracket: ']'
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          expression:
+            column_reference:
+              naked_identifier: a
+            array_accessor:
+            - start_square_bracket: '['
+            - expression:
+              - numeric_literal: '1'
+              - binary_operator: +
+              - numeric_literal: '2'
+            - slice: ':'
+            - expression:
+              - numeric_literal: '3'
+              - binary_operator: +
+              - numeric_literal: '4'
+            - end_square_bracket: ']'
+      - comma: ','
+      - select_clause_element:
+          expression:
+            column_reference:
+              naked_identifier: b
+            array_accessor:
+              start_square_bracket: '['
+              expression:
+              - numeric_literal: '5'
+              - binary_operator: +
+              - numeric_literal: '6'
+              end_square_bracket: ']'
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_cast_with_whitespaces.yml b/test/fixtures/dialects/postgres/postgres_cast_with_whitespaces.yml
index b442cf8..05b8ce8 100644
--- a/test/fixtures/dialects/postgres/postgres_cast_with_whitespaces.yml
+++ b/test/fixtures/dialects/postgres/postgres_cast_with_whitespaces.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 45f6ce610eb11288932f751215ccd224f40f71b9709a919f29620b0453a0cdaf
+_hash: 8aa7f240daeacea8a3fa688ddb3fea842e1a61cbfda2ff0d93b25e749ebb126b
 file:
 - statement:
     select_statement:
@@ -138,10 +138,11 @@ file:
               casting_operator: '::'
               data_type:
                 keyword: VARCHAR
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '512'
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '512'
+                    end_bracket: )
       from_clause:
         keyword: FROM
         from_expression:
@@ -233,10 +234,11 @@ file:
                   casting_operator: '::'
                   data_type:
                     keyword: VARCHAR
-                    bracketed:
-                      start_bracket: (
-                      numeric_literal: '512'
-                      end_bracket: )
+                    bracketed_arguments:
+                      bracketed:
+                        start_bracket: (
+                        numeric_literal: '512'
+                        end_bracket: )
               - comparison_operator:
                   raw_comparison_operator: '='
               - cast_expression:
@@ -247,10 +249,11 @@ file:
                   casting_operator: '::'
                   data_type:
                     keyword: VARCHAR
-                    bracketed:
-                      start_bracket: (
-                      numeric_literal: '512'
-                      end_bracket: )
+                    bracketed_arguments:
+                      bracketed:
+                        start_bracket: (
+                        numeric_literal: '512'
+                        end_bracket: )
       where_clause:
         keyword: WHERE
         expression:
diff --git a/test/fixtures/dialects/postgres/postgres_comment_on.yml b/test/fixtures/dialects/postgres/postgres_comment_on.yml
index b21eb8a..99cad13 100644
--- a/test/fixtures/dialects/postgres/postgres_comment_on.yml
+++ b/test/fixtures/dialects/postgres/postgres_comment_on.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 33381b0b86302058a5674d19c7a48f20a5af530917b5185963a97a4d581aba83
+_hash: 30f7e598ee1ea2bb81c0e7000e562ff401d31fe6d8058dbf6031afe373fdc6fe
 file:
 - statement:
     comment_clause:
@@ -45,8 +45,8 @@ file:
         naked_identifier: my_aggregate
     - bracketed:
       - start_bracket: (
-      - raw: double
-      - raw: precision
+      - code: double
+      - code: precision
       - end_bracket: )
     - keyword: IS
     - quoted_literal: "'Computes sample variance'"
@@ -284,9 +284,9 @@ file:
         naked_identifier: my_proc
     - bracketed:
       - start_bracket: (
-      - raw: integer
+      - code: integer
       - comma: ','
-      - raw: integer
+      - code: integer
       - end_bracket: )
     - keyword: IS
     - quoted_literal: "'Runs a report'"
@@ -343,9 +343,9 @@ file:
         naked_identifier: my_routine
     - bracketed:
       - start_bracket: (
-      - raw: integer
+      - code: integer
       - comma: ','
-      - raw: integer
+      - code: integer
       - end_bracket: )
     - keyword: IS
     - quoted_literal: "'Runs a routine (which is a function or procedure)'"
diff --git a/test/fixtures/dialects/postgres/postgres_composite_types.yml b/test/fixtures/dialects/postgres/postgres_composite_types.yml
index 02b3e6b..8da4d4c 100644
--- a/test/fixtures/dialects/postgres/postgres_composite_types.yml
+++ b/test/fixtures/dialects/postgres/postgres_composite_types.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 3b6a6b1369d1d10833d15178a6c7a131652b89a235dca285c32f963dcb730b7c
+_hash: fd4a08b86542fd0e9656247766feaeeedc6af7c222fdbc8ba378e599a3bf8867
 file:
 - statement:
     create_type_statement:
@@ -14,14 +14,14 @@ file:
     - keyword: AS
     - bracketed:
       - start_bracket: (
-      - raw: int_
-      - raw: INT4
+      - code: int_
+      - code: INT4
       - comma: ','
-      - raw: bool_
-      - raw: BOOLEAN
+      - code: bool_
+      - code: BOOLEAN
       - comma: ','
-      - raw: comment_
-      - raw: TEXT
+      - code: comment_
+      - code: TEXT
       - end_bracket: )
 - statement_terminator: ;
 - statement:
diff --git a/test/fixtures/dialects/postgres/postgres_create_cast.sql b/test/fixtures/dialects/postgres/postgres_create_cast.sql
new file mode 100644
index 0000000..2db697e
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_create_cast.sql
@@ -0,0 +1,24 @@
+CREATE CAST (int AS bool) WITH FUNCTION fname;
+CREATE CAST (int AS bool) WITH FUNCTION fname AS ASSIGNMENT;
+CREATE CAST (int AS bool) WITH FUNCTION fname AS IMPLICIT;
+
+CREATE CAST (int AS bool) WITH FUNCTION fname();
+CREATE CAST (int AS bool) WITH FUNCTION fname() AS ASSIGNMENT;
+CREATE CAST (int AS bool) WITH FUNCTION fname() AS IMPLICIT;
+
+CREATE CAST (int AS bool) WITH FUNCTION fname(bool);
+
+CREATE CAST (int AS bool) WITH FUNCTION sch.fname(int, bool) AS ASSIGNMENT;
+
+CREATE CAST (udt_1 AS udt_2) WITH FUNCTION fname(udt_1, udt_2);
+
+CREATE CAST (sch.udt_1 AS sch.udt_2) WITH FUNCTION sch.fname(sch.udt_1, sch.udt_2);
+
+-- PG extension for not listing an actual function:
+CREATE CAST (int AS bool) WITHOUT FUNCTION;
+CREATE CAST (int AS bool) WITHOUT FUNCTION AS ASSIGNMENT;
+CREATE CAST (int AS bool) WITHOUT FUNCTION AS IMPLICIT;
+
+CREATE CAST (int AS bool) WITH INOUT;
+CREATE CAST (int AS bool) WITH INOUT AS ASSIGNMENT;
+CREATE CAST (int AS bool) WITH INOUT AS IMPLICIT;
diff --git a/test/fixtures/dialects/postgres/postgres_create_cast.yml b/test/fixtures/dialects/postgres/postgres_create_cast.yml
new file mode 100644
index 0000000..6e8f2cd
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_create_cast.yml
@@ -0,0 +1,342 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 9e34d987801d78938da48d706596e4179445070e27a9b8e4a6146a28543d4c37
+file:
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        function_name_identifier: fname
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        function_name_identifier: fname
+    - keyword: AS
+    - keyword: ASSIGNMENT
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        function_name_identifier: fname
+    - keyword: AS
+    - keyword: IMPLICIT
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          end_bracket: )
+    - keyword: AS
+    - keyword: ASSIGNMENT
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          end_bracket: )
+    - keyword: AS
+    - keyword: IMPLICIT
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          data_type:
+            keyword: bool
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        naked_identifier: sch
+        dot: .
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+        - start_bracket: (
+        - data_type:
+            keyword: int
+        - comma: ','
+        - data_type:
+            keyword: bool
+        - end_bracket: )
+    - keyword: AS
+    - keyword: ASSIGNMENT
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: udt_1
+      - keyword: AS
+      - data_type:
+          data_type_identifier: udt_2
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+        - start_bracket: (
+        - data_type:
+            data_type_identifier: udt_1
+        - comma: ','
+        - data_type:
+            data_type_identifier: udt_2
+        - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          naked_identifier: sch
+          dot: .
+          data_type_identifier: udt_1
+      - keyword: AS
+      - data_type:
+          naked_identifier: sch
+          dot: .
+          data_type_identifier: udt_2
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: FUNCTION
+    - function_name:
+        naked_identifier: sch
+        dot: .
+        function_name_identifier: fname
+    - function_parameter_list:
+        bracketed:
+        - start_bracket: (
+        - data_type:
+            naked_identifier: sch
+            dot: .
+            data_type_identifier: udt_1
+        - comma: ','
+        - data_type:
+            naked_identifier: sch
+            dot: .
+            data_type_identifier: udt_2
+        - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: WITHOUT
+    - keyword: FUNCTION
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: WITHOUT
+    - keyword: FUNCTION
+    - keyword: AS
+    - keyword: ASSIGNMENT
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: WITHOUT
+    - keyword: FUNCTION
+    - keyword: AS
+    - keyword: IMPLICIT
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: INOUT
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: INOUT
+    - keyword: AS
+    - keyword: ASSIGNMENT
+- statement_terminator: ;
+- statement:
+    create_cast_statement:
+    - keyword: CREATE
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: WITH
+    - keyword: INOUT
+    - keyword: AS
+    - keyword: IMPLICIT
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_create_extension.sql b/test/fixtures/dialects/postgres/postgres_create_extension.sql
index 6d37611..aa911b3 100644
--- a/test/fixtures/dialects/postgres/postgres_create_extension.sql
+++ b/test/fixtures/dialects/postgres/postgres_create_extension.sql
@@ -1,13 +1,17 @@
 CREATE EXTENSION amazing_extension
     with schema schema1
-    VERSION 2.0
-    FROM 1.0;
+    VERSION '2.0.1.2'
+    FROM '1.0';
 
 CREATE EXTENSION IF NOT EXISTS amazing_extension
     with schema schema1
-    VERSION 2.0
-    FROM 1.0;
+    VERSION '1.2.3a4'
+    FROM '1.0';
 
+CREATE EXTENSION amazing_extension
+    with schema schema1
+    VERSION version_named
+    FROM from_named;
 
 DROP EXTENSION amazing_extension;
 
diff --git a/test/fixtures/dialects/postgres/postgres_create_extension.yml b/test/fixtures/dialects/postgres/postgres_create_extension.yml
index 3e325ff..47bdc88 100644
--- a/test/fixtures/dialects/postgres/postgres_create_extension.yml
+++ b/test/fixtures/dialects/postgres/postgres_create_extension.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: ecc9699f8a881f56b241c5099433e6d13173c8537e1aa113206c93ba08b6f4ba
+_hash: 13f0d8a792a648d25f079cc2edfbf2bd9fa6571e79c3aab948efd5a20f9e8526
 file:
 - statement:
     create_extension_statement:
@@ -16,9 +16,11 @@ file:
     - schema_reference:
         naked_identifier: schema1
     - keyword: VERSION
-    - identifier: '2.0'
+    - version_identifier:
+        quoted_literal: "'2.0.1.2'"
     - keyword: FROM
-    - identifier: '1.0'
+    - version_identifier:
+        quoted_literal: "'1.0'"
 - statement_terminator: ;
 - statement:
     create_extension_statement:
@@ -34,9 +36,28 @@ file:
     - schema_reference:
         naked_identifier: schema1
     - keyword: VERSION
-    - identifier: '2.0'
+    - version_identifier:
+        quoted_literal: "'1.2.3a4'"
     - keyword: FROM
-    - identifier: '1.0'
+    - version_identifier:
+        quoted_literal: "'1.0'"
+- statement_terminator: ;
+- statement:
+    create_extension_statement:
+    - keyword: CREATE
+    - keyword: EXTENSION
+    - extension_reference:
+        naked_identifier: amazing_extension
+    - keyword: with
+    - keyword: schema
+    - schema_reference:
+        naked_identifier: schema1
+    - keyword: VERSION
+    - version_identifier:
+        naked_identifier: version_named
+    - keyword: FROM
+    - version_identifier:
+        naked_identifier: from_named
 - statement_terminator: ;
 - statement:
     drop_extension_statement:
diff --git a/test/fixtures/dialects/postgres/postgres_create_function.yml b/test/fixtures/dialects/postgres/postgres_create_function.yml
index f499f0c..7e2c485 100644
--- a/test/fixtures/dialects/postgres/postgres_create_function.yml
+++ b/test/fixtures/dialects/postgres/postgres_create_function.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: fd7206234d3089bf40aa15d903a2a756abe694096830bbd9e25d37311ce22728
+_hash: a2a0f2c4bee4578f40958582525e3f84a4b19d621ce8333f1364624f3b028c98
 file:
 - statement:
     create_function_statement:
@@ -325,11 +325,11 @@ file:
     - keyword: AS
     - bracketed:
       - start_bracket: (
-      - raw: f1
-      - raw: int
+      - code: f1
+      - code: int
       - comma: ','
-      - raw: f2
-      - raw: text
+      - code: f2
+      - code: text
       - end_bracket: )
 - statement_terminator: ;
 - statement:
diff --git a/test/fixtures/dialects/postgres/postgres_create_index.sql b/test/fixtures/dialects/postgres/postgres_create_index.sql
index 7572310..a7272a1 100644
--- a/test/fixtures/dialects/postgres/postgres_create_index.sql
+++ b/test/fixtures/dialects/postgres/postgres_create_index.sql
@@ -10,6 +10,8 @@ CREATE INDEX title_idx_german ON films (title COLLATE "de_DE");
 
 CREATE INDEX title_idx_nulls_low ON films (title NULLS FIRST);
 
+CREATE INDEX title_idx_nulls_high ON films (title NULLS LAST);
+
 CREATE UNIQUE INDEX title_idx ON films (title) WITH (fillfactor = 70);
 
 CREATE INDEX gin_idx ON documents_table USING GIN (locations) WITH (fastupdate = 'off');
@@ -21,4 +23,19 @@ CREATE INDEX pointloc
 
 CREATE INDEX CONCURRENTLY sales_quantity_index ON sales_table (quantity);
 
-CREATE INDEX super_idx ON super_table USING BTREE (super_column DESC);
+CREATE INDEX super_idx ON super_table USING btree(super_column DESC);
+
+CREATE INDEX opclass_index ON schema.opclass_table (col varchar_pattern_ops);
+
+CREATE INDEX opclass_index_with_parameters ON schema.opclass_table (col varchar_pattern_ops(p1='3', p2='4'));
+
+CREATE UNIQUE INDEX tests_success_constraint ON tests (subject, target)
+    WHERE success;
+
+CREATE INDEX nulls_distinct_index ON documents_table USING GIN (locations)
+     NULLS DISTINCT WITH (fastupdate = 'off');
+
+CREATE INDEX nulls_not_distinct_index ON documents_table USING GIN (locations)
+    NULLS NOT DISTINCT WITH (fastupdate = 'off');
+
+CREATE INDEX code_idx ON films (code) TABLESPACE indexspace;
diff --git a/test/fixtures/dialects/postgres/postgres_create_index.yml b/test/fixtures/dialects/postgres/postgres_create_index.yml
index 91c9d69..a1e363e 100644
--- a/test/fixtures/dialects/postgres/postgres_create_index.yml
+++ b/test/fixtures/dialects/postgres/postgres_create_index.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 8118dce1bf51d3010c7f0c0f853fc85a25e6dee0c5833eaaecea62a61efba029
+_hash: 0a68c64141d43e375847c6dabd5d5cd128636a78296754feb4f2d13ba33ee451
 file:
 - statement:
     create_index_statement:
@@ -17,8 +17,9 @@ file:
         naked_identifier: films
     - bracketed:
         start_bracket: (
-        column_reference:
-          naked_identifier: title
+        index_element:
+          column_reference:
+            naked_identifier: title
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -33,17 +34,20 @@ file:
         naked_identifier: films
     - bracketed:
         start_bracket: (
-        column_reference:
-          naked_identifier: title
+        index_element:
+          column_reference:
+            naked_identifier: title
         end_bracket: )
     - keyword: INCLUDE
     - bracketed:
       - start_bracket: (
-      - column_reference:
-          naked_identifier: director
+      - index_element:
+          column_reference:
+            naked_identifier: director
       - comma: ','
-      - column_reference:
-          naked_identifier: rating
+      - index_element:
+          column_reference:
+            naked_identifier: rating
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -57,39 +61,64 @@ file:
         naked_identifier: films
     - bracketed:
         start_bracket: (
-        column_reference:
-          naked_identifier: title
+        index_element:
+          column_reference:
+            naked_identifier: title
         end_bracket: )
     - keyword: WITH
+    - relation_options:
+        bracketed:
+          start_bracket: (
+          relation_option:
+            properties_naked_identifier: deduplicate_items
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'off'"
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: INDEX
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: films
     - bracketed:
         start_bracket: (
-        parameter: deduplicate_items
-        comparison_operator:
-          raw_comparison_operator: '='
-        quoted_literal: "'off'"
+        index_element:
+          bracketed:
+            start_bracket: (
+            expression:
+              function:
+                function_name:
+                  function_name_identifier: lower
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    column_reference:
+                      naked_identifier: title
+                  end_bracket: )
+            end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
     create_index_statement:
     - keyword: CREATE
     - keyword: INDEX
+    - index_reference:
+        naked_identifier: title_idx_german
     - keyword: 'ON'
     - table_reference:
         naked_identifier: films
     - bracketed:
         start_bracket: (
-        bracketed:
-          start_bracket: (
-          function:
-            function_name:
-              function_name_identifier: lower
-            bracketed:
-              start_bracket: (
-              expression:
-                column_reference:
-                  naked_identifier: title
-              end_bracket: )
-          end_bracket: )
+        index_element:
+          column_reference:
+            naked_identifier: title
+          index_element_options:
+            keyword: COLLATE
+            collation_reference:
+              quoted_identifier: '"de_DE"'
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -97,16 +126,18 @@ file:
     - keyword: CREATE
     - keyword: INDEX
     - index_reference:
-        naked_identifier: title_idx_german
+        naked_identifier: title_idx_nulls_low
     - keyword: 'ON'
     - table_reference:
         naked_identifier: films
     - bracketed:
         start_bracket: (
-        column_reference:
-          naked_identifier: title
-        keyword: COLLATE
-        quoted_identifier: '"de_DE"'
+        index_element:
+          column_reference:
+            naked_identifier: title
+          index_element_options:
+          - keyword: NULLS
+          - keyword: FIRST
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -114,17 +145,19 @@ file:
     - keyword: CREATE
     - keyword: INDEX
     - index_reference:
-        naked_identifier: title_idx_nulls_low
+        naked_identifier: title_idx_nulls_high
     - keyword: 'ON'
     - table_reference:
         naked_identifier: films
     - bracketed:
-      - start_bracket: (
-      - column_reference:
-          naked_identifier: title
-      - keyword: NULLS
-      - keyword: FIRST
-      - end_bracket: )
+        start_bracket: (
+        index_element:
+          column_reference:
+            naked_identifier: title
+          index_element_options:
+          - keyword: NULLS
+          - keyword: LAST
+        end_bracket: )
 - statement_terminator: ;
 - statement:
     create_index_statement:
@@ -138,17 +171,20 @@ file:
         naked_identifier: films
     - bracketed:
         start_bracket: (
-        column_reference:
-          naked_identifier: title
+        index_element:
+          column_reference:
+            naked_identifier: title
         end_bracket: )
     - keyword: WITH
-    - bracketed:
-        start_bracket: (
-        parameter: fillfactor
-        comparison_operator:
-          raw_comparison_operator: '='
-        numeric_literal: '70'
-        end_bracket: )
+    - relation_options:
+        bracketed:
+          start_bracket: (
+          relation_option:
+            properties_naked_identifier: fillfactor
+            comparison_operator:
+              raw_comparison_operator: '='
+            numeric_literal: '70'
+          end_bracket: )
 - statement_terminator: ;
 - statement:
     create_index_statement:
@@ -160,23 +196,24 @@ file:
     - table_reference:
         naked_identifier: documents_table
     - keyword: USING
-    - function:
-        function_name:
-          function_name_identifier: GIN
-        bracketed:
-          start_bracket: (
-          expression:
-            column_reference:
-              naked_identifier: locations
-          end_bracket: )
-    - keyword: WITH
+    - index_access_method:
+        naked_identifier: GIN
     - bracketed:
         start_bracket: (
-        parameter: fastupdate
-        comparison_operator:
-          raw_comparison_operator: '='
-        quoted_literal: "'off'"
+        index_element:
+          column_reference:
+            naked_identifier: locations
         end_bracket: )
+    - keyword: WITH
+    - relation_options:
+        bracketed:
+          start_bracket: (
+          relation_option:
+            properties_naked_identifier: fastupdate
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'off'"
+          end_bracket: )
 - statement_terminator: ;
 - statement:
     create_index_statement:
@@ -189,11 +226,12 @@ file:
         naked_identifier: films
     - bracketed:
         start_bracket: (
-        column_reference:
-          naked_identifier: code
+        index_element:
+          column_reference:
+            naked_identifier: code
         end_bracket: )
     - keyword: TABLESPACE
-    - table_reference:
+    - tablespace_reference:
         naked_identifier: indexspace
 - statement_terminator: ;
 - statement:
@@ -206,26 +244,25 @@ file:
     - table_reference:
         naked_identifier: points
     - keyword: USING
-    - function:
-        function_name:
-          function_name_identifier: gist
-        bracketed:
-          start_bracket: (
-          expression:
-            function:
-              function_name:
-                function_name_identifier: box
-              bracketed:
-              - start_bracket: (
-              - expression:
-                  column_reference:
-                    naked_identifier: location
-              - comma: ','
-              - expression:
-                  column_reference:
-                    naked_identifier: location
-              - end_bracket: )
-          end_bracket: )
+    - index_access_method:
+        naked_identifier: gist
+    - bracketed:
+        start_bracket: (
+        index_element:
+          function:
+            function_name:
+              function_name_identifier: box
+            bracketed:
+            - start_bracket: (
+            - expression:
+                column_reference:
+                  naked_identifier: location
+            - comma: ','
+            - expression:
+                column_reference:
+                  naked_identifier: location
+            - end_bracket: )
+        end_bracket: )
 - statement_terminator: ;
 - statement:
     create_index_statement:
@@ -239,8 +276,9 @@ file:
         naked_identifier: sales_table
     - bracketed:
         start_bracket: (
-        column_reference:
-          naked_identifier: quantity
+        index_element:
+          column_reference:
+            naked_identifier: quantity
         end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -253,13 +291,178 @@ file:
     - table_reference:
         naked_identifier: super_table
     - keyword: USING
-    - function:
-        function_name:
-          function_name_identifier: BTREE
-        bracketed:
-          start_bracket: (
-          index_column_definition:
+    - index_access_method:
+        naked_identifier: btree
+    - bracketed:
+        start_bracket: (
+        index_element:
+          column_reference:
             naked_identifier: super_column
+          index_element_options:
             keyword: DESC
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: opclass_index
+    - keyword: 'ON'
+    - table_reference:
+      - naked_identifier: schema
+      - dot: .
+      - naked_identifier: opclass_table
+    - bracketed:
+        start_bracket: (
+        index_element:
+          column_reference:
+            naked_identifier: col
+          index_element_options:
+            operator_class_reference:
+              naked_identifier: varchar_pattern_ops
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: opclass_index_with_parameters
+    - keyword: 'ON'
+    - table_reference:
+      - naked_identifier: schema
+      - dot: .
+      - naked_identifier: opclass_table
+    - bracketed:
+        start_bracket: (
+        index_element:
+          column_reference:
+            naked_identifier: col
+          index_element_options:
+            operator_class_reference:
+              naked_identifier: varchar_pattern_ops
+            relation_options:
+              bracketed:
+              - start_bracket: (
+              - relation_option:
+                  properties_naked_identifier: p1
+                  comparison_operator:
+                    raw_comparison_operator: '='
+                  quoted_literal: "'3'"
+              - comma: ','
+              - relation_option:
+                  properties_naked_identifier: p2
+                  comparison_operator:
+                    raw_comparison_operator: '='
+                  quoted_literal: "'4'"
+              - end_bracket: )
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: UNIQUE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: tests_success_constraint
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: tests
+    - bracketed:
+      - start_bracket: (
+      - index_element:
+          column_reference:
+            naked_identifier: subject
+      - comma: ','
+      - index_element:
+          column_reference:
+            naked_identifier: target
+      - end_bracket: )
+    - keyword: WHERE
+    - expression:
+        column_reference:
+          naked_identifier: success
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: nulls_distinct_index
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: documents_table
+    - keyword: USING
+    - index_access_method:
+        naked_identifier: GIN
+    - bracketed:
+        start_bracket: (
+        index_element:
+          column_reference:
+            naked_identifier: locations
+        end_bracket: )
+    - keyword: NULLS
+    - keyword: DISTINCT
+    - keyword: WITH
+    - relation_options:
+        bracketed:
+          start_bracket: (
+          relation_option:
+            properties_naked_identifier: fastupdate
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'off'"
           end_bracket: )
 - statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: nulls_not_distinct_index
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: documents_table
+    - keyword: USING
+    - index_access_method:
+        naked_identifier: GIN
+    - bracketed:
+        start_bracket: (
+        index_element:
+          column_reference:
+            naked_identifier: locations
+        end_bracket: )
+    - keyword: NULLS
+    - keyword: NOT
+    - keyword: DISTINCT
+    - keyword: WITH
+    - relation_options:
+        bracketed:
+          start_bracket: (
+          relation_option:
+            properties_naked_identifier: fastupdate
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'off'"
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_index_statement:
+    - keyword: CREATE
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: code_idx
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: films
+    - bracketed:
+        start_bracket: (
+        index_element:
+          column_reference:
+            naked_identifier: code
+        end_bracket: )
+    - keyword: TABLESPACE
+    - tablespace_reference:
+        naked_identifier: indexspace
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_create_materialized_view.sql b/test/fixtures/dialects/postgres/postgres_create_materialized_view.sql
index 24cab81..cf9f248 100644
--- a/test/fixtures/dialects/postgres/postgres_create_materialized_view.sql
+++ b/test/fixtures/dialects/postgres/postgres_create_materialized_view.sql
@@ -100,8 +100,8 @@ WITH NO DATA;
 
 CREATE MATERIALIZED VIEW my_mat_view
 USING heap
-TABLESPACE pg_default
 WITH (prop_a = 1, prob_b = 'some_value', prop_c = FALSE, prop_d)
+TABLESPACE pg_default
 AS
 (
     SELECT
@@ -116,8 +116,8 @@ WITH DATA;
 
 CREATE MATERIALIZED VIEW IF NOT EXISTS my_mat_view
 USING heap
-TABLESPACE pg_default
 WITH (prop_a = 1, prob_b = 'some_value', prop_c = FALSE, prop_d)
+TABLESPACE pg_default
 AS
 (
     SELECT
@@ -152,6 +152,12 @@ AS
 SELECT a
 FROM my_table;
 
+CREATE MATERIALIZED VIEW my_mat_view
+WITH (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC)
+AS
+SELECT a
+FROM my_table;
+
 CREATE OR REPLACE MATERIALIZED VIEW my_mat_view AS
 SELECT a
 FROM my_table;
diff --git a/test/fixtures/dialects/postgres/postgres_create_materialized_view.yml b/test/fixtures/dialects/postgres/postgres_create_materialized_view.yml
index ccf0025..2ae793e 100644
--- a/test/fixtures/dialects/postgres/postgres_create_materialized_view.yml
+++ b/test/fixtures/dialects/postgres/postgres_create_materialized_view.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 241ca7aedfaff431b1ffae209bc65103e8caf6896b36324a8d4fb38b27c4bfb4
+_hash: dfaeec3bfd6fd9a2a771d1e41fc2fe9a05ee677f498f73a239f3b1c87f8fc24a
 file:
 - statement:
     create_materialized_view_statement:
@@ -429,29 +429,34 @@ file:
         naked_identifier: my_mat_view
     - keyword: USING
     - parameter: heap
+    - keyword: WITH
+    - relation_options:
+        bracketed:
+        - start_bracket: (
+        - relation_option:
+            properties_naked_identifier: prop_a
+            comparison_operator:
+              raw_comparison_operator: '='
+            numeric_literal: '1'
+        - comma: ','
+        - relation_option:
+            properties_naked_identifier: prob_b
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'some_value'"
+        - comma: ','
+        - relation_option:
+            properties_naked_identifier: prop_c
+            comparison_operator:
+              raw_comparison_operator: '='
+            boolean_literal: 'FALSE'
+        - comma: ','
+        - relation_option:
+            properties_naked_identifier: prop_d
+        - end_bracket: )
     - keyword: TABLESPACE
     - tablespace_reference:
         naked_identifier: pg_default
-    - keyword: WITH
-    - bracketed:
-      - start_bracket: (
-      - parameter: prop_a
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - numeric_literal: '1'
-      - comma: ','
-      - parameter: prob_b
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - quoted_literal: "'some_value'"
-      - comma: ','
-      - parameter: prop_c
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - boolean_literal: 'FALSE'
-      - comma: ','
-      - parameter: prop_d
-      - end_bracket: )
     - keyword: AS
     - bracketed:
         start_bracket: (
@@ -524,29 +529,34 @@ file:
         naked_identifier: my_mat_view
     - keyword: USING
     - parameter: heap
+    - keyword: WITH
+    - relation_options:
+        bracketed:
+        - start_bracket: (
+        - relation_option:
+            properties_naked_identifier: prop_a
+            comparison_operator:
+              raw_comparison_operator: '='
+            numeric_literal: '1'
+        - comma: ','
+        - relation_option:
+            properties_naked_identifier: prob_b
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'some_value'"
+        - comma: ','
+        - relation_option:
+            properties_naked_identifier: prop_c
+            comparison_operator:
+              raw_comparison_operator: '='
+            boolean_literal: 'FALSE'
+        - comma: ','
+        - relation_option:
+            properties_naked_identifier: prop_d
+        - end_bracket: )
     - keyword: TABLESPACE
     - tablespace_reference:
         naked_identifier: pg_default
-    - keyword: WITH
-    - bracketed:
-      - start_bracket: (
-      - parameter: prop_a
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - numeric_literal: '1'
-      - comma: ','
-      - parameter: prob_b
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - quoted_literal: "'some_value'"
-      - comma: ','
-      - parameter: prop_c
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - boolean_literal: 'FALSE'
-      - comma: ','
-      - parameter: prop_d
-      - end_bracket: )
     - keyword: AS
     - bracketed:
         start_bracket: (
@@ -683,12 +693,82 @@ file:
     - table_reference:
         naked_identifier: my_mat_view
     - keyword: WITH
-    - bracketed:
-      - start_bracket: (
-      - parameter: left
-      - dot: .
-      - parameter: right
-      - end_bracket: )
+    - relation_options:
+        bracketed:
+          start_bracket: (
+          relation_option:
+          - properties_naked_identifier: left
+          - dot: .
+          - properties_naked_identifier: right
+          end_bracket: )
+    - keyword: AS
+    - select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            column_reference:
+              naked_identifier: a
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: my_table
+- statement_terminator: ;
+- statement:
+    create_materialized_view_statement:
+    - keyword: CREATE
+    - keyword: MATERIALIZED
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: my_mat_view
+    - keyword: WITH
+    - relation_options:
+        bracketed:
+        - start_bracket: (
+        - relation_option:
+            properties_naked_identifier: opt1
+        - comma: ','
+        - relation_option:
+            properties_naked_identifier: opt2
+            comparison_operator:
+              raw_comparison_operator: '='
+            numeric_literal: '5'
+        - comma: ','
+        - relation_option:
+            properties_naked_identifier: opt3
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'str'"
+        - comma: ','
+        - relation_option:
+          - properties_naked_identifier: ns
+          - dot: .
+          - properties_naked_identifier: opt4
+        - comma: ','
+        - relation_option:
+          - properties_naked_identifier: ns
+          - dot: .
+          - properties_naked_identifier: opt5
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - numeric_literal: '6'
+        - comma: ','
+        - relation_option:
+          - properties_naked_identifier: ns
+          - dot: .
+          - properties_naked_identifier: opt6
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - quoted_literal: "'str'"
+        - comma: ','
+        - relation_option:
+          - properties_naked_identifier: opt7
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - properties_naked_identifier: ASC
+        - end_bracket: )
     - keyword: AS
     - select_statement:
         select_clause:
diff --git a/test/fixtures/dialects/postgres/postgres_create_publication.sql b/test/fixtures/dialects/postgres/postgres_create_publication.sql
new file mode 100644
index 0000000..a0764bf
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_create_publication.sql
@@ -0,0 +1,56 @@
+CREATE PUBLICATION abc;
+
+CREATE PUBLICATION abc FOR ALL TABLES;
+
+CREATE PUBLICATION abc FOR TABLE def;
+
+CREATE PUBLICATION abc FOR TABLE def, sch.ghi;
+
+CREATE PUBLICATION abc FOR TABLE def, TABLE sch.ghi;
+
+CREATE PUBLICATION abc FOR TABLE def*;
+
+CREATE PUBLICATION abc FOR
+    TABLE a,
+    TABLE aa, ab, ac,
+    TABLE ONLY b,
+    TABLE c*,
+    TABLE ca*, cb*,
+    TABLE ONLY (d),
+    TABLE e (col1),
+    TABLE f (col2, col3),
+    TABLE g* (col4, col5),
+    TABLE h WHERE (col6 > col7),
+    TABLE i (col8, col9) WHERE (col10 > col11),
+    TABLES IN SCHEMA j,
+    TABLES IN SCHEMA k,
+    TABLES IN SCHEMA CURRENT_SCHEMA, l, m,
+    TABLES IN SCHEMA n, o, p;
+
+CREATE PUBLICATION abc FOR TABLE a, b
+    WITH (publish = 'insert,update', publish_via_partition_root = TRUE);
+
+CREATE PUBLICATION abc FOR TABLE a, b
+    WITH (publish_via_partition_root = TRUE);
+
+CREATE PUBLICATION abc FOR TABLE a, b
+    WITH (publish = 'insert,update');
+
+CREATE PUBLICATION abc WITH (publish = 'insert,update');
+
+-- examples from https://www.postgresql.org/docs/15/sql-createpublication.html
+
+CREATE PUBLICATION mypublication FOR TABLE users, departments;
+
+CREATE PUBLICATION active_departments FOR TABLE departments WHERE (active IS TRUE);
+
+CREATE PUBLICATION alltables FOR ALL TABLES;
+
+CREATE PUBLICATION insert_only FOR TABLE mydata
+    WITH (publish = 'insert');
+
+CREATE PUBLICATION production_publication FOR TABLE users, departments, TABLES IN SCHEMA production;
+
+CREATE PUBLICATION sales_publication FOR TABLES IN SCHEMA marketing, sales;
+
+CREATE PUBLICATION users_filtered FOR TABLE users (user_id, firstname);
diff --git a/test/fixtures/dialects/postgres/postgres_create_publication.yml b/test/fixtures/dialects/postgres/postgres_create_publication.yml
new file mode 100644
index 0000000..84dd115
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_create_publication.yml
@@ -0,0 +1,513 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: dfab8d08af80c56bc6be0c0658fedcccb5142a9c490c88f6198973e7ef322b5e
+file:
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: FOR
+    - keyword: ALL
+    - keyword: TABLES
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: FOR
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: def
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: FOR
+    - publication_objects:
+      - keyword: TABLE
+      - publication_table:
+          table_reference:
+            naked_identifier: def
+      - comma: ','
+      - publication_table:
+          table_reference:
+          - naked_identifier: sch
+          - dot: .
+          - naked_identifier: ghi
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: FOR
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: def
+    - comma: ','
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+          - naked_identifier: sch
+          - dot: .
+          - naked_identifier: ghi
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: FOR
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: def
+          star: '*'
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: FOR
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: a
+    - comma: ','
+    - publication_objects:
+      - keyword: TABLE
+      - publication_table:
+          table_reference:
+            naked_identifier: aa
+      - comma: ','
+      - publication_table:
+          table_reference:
+            naked_identifier: ab
+      - comma: ','
+      - publication_table:
+          table_reference:
+            naked_identifier: ac
+    - comma: ','
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          keyword: ONLY
+          table_reference:
+            naked_identifier: b
+    - comma: ','
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: c
+          star: '*'
+    - comma: ','
+    - publication_objects:
+      - keyword: TABLE
+      - publication_table:
+          table_reference:
+            naked_identifier: ca
+          star: '*'
+      - comma: ','
+      - publication_table:
+          table_reference:
+            naked_identifier: cb
+          star: '*'
+    - comma: ','
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          keyword: ONLY
+          bracketed:
+            start_bracket: (
+            table_reference:
+              naked_identifier: d
+            end_bracket: )
+    - comma: ','
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: e
+          bracketed:
+            start_bracket: (
+            column_reference:
+              naked_identifier: col1
+            end_bracket: )
+    - comma: ','
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: f
+          bracketed:
+          - start_bracket: (
+          - column_reference:
+              naked_identifier: col2
+          - comma: ','
+          - column_reference:
+              naked_identifier: col3
+          - end_bracket: )
+    - comma: ','
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: g
+          star: '*'
+          bracketed:
+          - start_bracket: (
+          - column_reference:
+              naked_identifier: col4
+          - comma: ','
+          - column_reference:
+              naked_identifier: col5
+          - end_bracket: )
+    - comma: ','
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: h
+          keyword: WHERE
+          bracketed:
+            start_bracket: (
+            expression:
+            - column_reference:
+                naked_identifier: col6
+            - comparison_operator:
+                raw_comparison_operator: '>'
+            - column_reference:
+                naked_identifier: col7
+            end_bracket: )
+    - comma: ','
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+        - table_reference:
+            naked_identifier: i
+        - bracketed:
+          - start_bracket: (
+          - column_reference:
+              naked_identifier: col8
+          - comma: ','
+          - column_reference:
+              naked_identifier: col9
+          - end_bracket: )
+        - keyword: WHERE
+        - bracketed:
+            start_bracket: (
+            expression:
+            - column_reference:
+                naked_identifier: col10
+            - comparison_operator:
+                raw_comparison_operator: '>'
+            - column_reference:
+                naked_identifier: col11
+            end_bracket: )
+    - comma: ','
+    - publication_objects:
+      - keyword: TABLES
+      - keyword: IN
+      - keyword: SCHEMA
+      - schema_reference:
+          naked_identifier: j
+    - comma: ','
+    - publication_objects:
+      - keyword: TABLES
+      - keyword: IN
+      - keyword: SCHEMA
+      - schema_reference:
+          naked_identifier: k
+    - comma: ','
+    - publication_objects:
+      - keyword: TABLES
+      - keyword: IN
+      - keyword: SCHEMA
+      - keyword: CURRENT_SCHEMA
+      - comma: ','
+      - schema_reference:
+          naked_identifier: l
+      - comma: ','
+      - schema_reference:
+          naked_identifier: m
+    - comma: ','
+    - publication_objects:
+      - keyword: TABLES
+      - keyword: IN
+      - keyword: SCHEMA
+      - schema_reference:
+          naked_identifier: n
+      - comma: ','
+      - schema_reference:
+          naked_identifier: o
+      - comma: ','
+      - schema_reference:
+          naked_identifier: p
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: FOR
+    - publication_objects:
+      - keyword: TABLE
+      - publication_table:
+          table_reference:
+            naked_identifier: a
+      - comma: ','
+      - publication_table:
+          table_reference:
+            naked_identifier: b
+    - keyword: WITH
+    - definition_parameters:
+        bracketed:
+        - start_bracket: (
+        - definition_parameter:
+            properties_naked_identifier: publish
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'insert,update'"
+        - comma: ','
+        - definition_parameter:
+            properties_naked_identifier: publish_via_partition_root
+            comparison_operator:
+              raw_comparison_operator: '='
+            boolean_literal: 'TRUE'
+        - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: FOR
+    - publication_objects:
+      - keyword: TABLE
+      - publication_table:
+          table_reference:
+            naked_identifier: a
+      - comma: ','
+      - publication_table:
+          table_reference:
+            naked_identifier: b
+    - keyword: WITH
+    - definition_parameters:
+        bracketed:
+          start_bracket: (
+          definition_parameter:
+            properties_naked_identifier: publish_via_partition_root
+            comparison_operator:
+              raw_comparison_operator: '='
+            boolean_literal: 'TRUE'
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: FOR
+    - publication_objects:
+      - keyword: TABLE
+      - publication_table:
+          table_reference:
+            naked_identifier: a
+      - comma: ','
+      - publication_table:
+          table_reference:
+            naked_identifier: b
+    - keyword: WITH
+    - definition_parameters:
+        bracketed:
+          start_bracket: (
+          definition_parameter:
+            properties_naked_identifier: publish
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'insert,update'"
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: WITH
+    - definition_parameters:
+        bracketed:
+          start_bracket: (
+          definition_parameter:
+            properties_naked_identifier: publish
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'insert,update'"
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: mypublication
+    - keyword: FOR
+    - publication_objects:
+      - keyword: TABLE
+      - publication_table:
+          table_reference:
+            naked_identifier: users
+      - comma: ','
+      - publication_table:
+          table_reference:
+            naked_identifier: departments
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: active_departments
+    - keyword: FOR
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: departments
+          keyword: WHERE
+          bracketed:
+            start_bracket: (
+            expression:
+              column_reference:
+                naked_identifier: active
+              keyword: IS
+              boolean_literal: 'TRUE'
+            end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: alltables
+    - keyword: FOR
+    - keyword: ALL
+    - keyword: TABLES
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: insert_only
+    - keyword: FOR
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: mydata
+    - keyword: WITH
+    - definition_parameters:
+        bracketed:
+          start_bracket: (
+          definition_parameter:
+            properties_naked_identifier: publish
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'insert'"
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: production_publication
+    - keyword: FOR
+    - publication_objects:
+      - keyword: TABLE
+      - publication_table:
+          table_reference:
+            naked_identifier: users
+      - comma: ','
+      - publication_table:
+          table_reference:
+            naked_identifier: departments
+    - comma: ','
+    - publication_objects:
+      - keyword: TABLES
+      - keyword: IN
+      - keyword: SCHEMA
+      - schema_reference:
+          naked_identifier: production
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: sales_publication
+    - keyword: FOR
+    - publication_objects:
+      - keyword: TABLES
+      - keyword: IN
+      - keyword: SCHEMA
+      - schema_reference:
+          naked_identifier: marketing
+      - comma: ','
+      - schema_reference:
+          naked_identifier: sales
+- statement_terminator: ;
+- statement:
+    create_publication_statement:
+    - keyword: CREATE
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: users_filtered
+    - keyword: FOR
+    - publication_objects:
+        keyword: TABLE
+        publication_table:
+          table_reference:
+            naked_identifier: users
+          bracketed:
+          - start_bracket: (
+          - column_reference:
+              naked_identifier: user_id
+          - comma: ','
+          - column_reference:
+              naked_identifier: firstname
+          - end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_create_schema.sql b/test/fixtures/dialects/postgres/postgres_create_schema.sql
new file mode 100644
index 0000000..a6381ff
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_create_schema.sql
@@ -0,0 +1,11 @@
+CREATE SCHEMA asdf;
+
+CREATE SCHEMA IF NOT EXISTS asdf;
+
+CREATE SCHEMA asdf AUTHORIZATION bob;
+
+CREATE SCHEMA AUTHORIZATION bob;
+
+CREATE SCHEMA IF NOT EXISTS asdf AUTHORIZATION bob;
+
+CREATE SCHEMA IF NOT EXISTS AUTHORIZATION bob;
diff --git a/test/fixtures/dialects/postgres/postgres_create_schema.yml b/test/fixtures/dialects/postgres/postgres_create_schema.yml
new file mode 100644
index 0000000..a28c6ae
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_create_schema.yml
@@ -0,0 +1,66 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: fb7e7b5ffc233fb6fb4275a3a828a7ec400f50bc68a49014a36be30461ccd0c7
+file:
+- statement:
+    create_schema_statement:
+    - keyword: CREATE
+    - keyword: SCHEMA
+    - schema_reference:
+        naked_identifier: asdf
+- statement_terminator: ;
+- statement:
+    create_schema_statement:
+    - keyword: CREATE
+    - keyword: SCHEMA
+    - keyword: IF
+    - keyword: NOT
+    - keyword: EXISTS
+    - schema_reference:
+        naked_identifier: asdf
+- statement_terminator: ;
+- statement:
+    create_schema_statement:
+    - keyword: CREATE
+    - keyword: SCHEMA
+    - schema_reference:
+        naked_identifier: asdf
+    - keyword: AUTHORIZATION
+    - role_reference:
+        naked_identifier: bob
+- statement_terminator: ;
+- statement:
+    create_schema_statement:
+    - keyword: CREATE
+    - keyword: SCHEMA
+    - keyword: AUTHORIZATION
+    - role_reference:
+        naked_identifier: bob
+- statement_terminator: ;
+- statement:
+    create_schema_statement:
+    - keyword: CREATE
+    - keyword: SCHEMA
+    - keyword: IF
+    - keyword: NOT
+    - keyword: EXISTS
+    - schema_reference:
+        naked_identifier: asdf
+    - keyword: AUTHORIZATION
+    - role_reference:
+        naked_identifier: bob
+- statement_terminator: ;
+- statement:
+    create_schema_statement:
+    - keyword: CREATE
+    - keyword: SCHEMA
+    - keyword: IF
+    - keyword: NOT
+    - keyword: EXISTS
+    - keyword: AUTHORIZATION
+    - role_reference:
+        naked_identifier: bob
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_create_table.sql b/test/fixtures/dialects/postgres/postgres_create_table.sql
index a927043..bfda32b 100644
--- a/test/fixtures/dialects/postgres/postgres_create_table.sql
+++ b/test/fixtures/dialects/postgres/postgres_create_table.sql
@@ -17,9 +17,9 @@ CREATE TABLE distributors (
      name   varchar(40) NOT NULL CHECK (name <> '')
 );
 
---CREATE TABLE array_int (
---    vector  int[][]
---);
+CREATE TABLE array_int (
+   vector  int[][]
+);
 
 --CREATE TABLE films (
 --    code        char(5),
@@ -33,7 +33,8 @@ CREATE TABLE distributors (
 
 CREATE TABLE distributors (
     did     integer CHECK (did > 100),
-    name    varchar(40)
+    name    varchar(40),
+    long_varying char varying(100)
 );
 
 CREATE TABLE distributors (
@@ -225,12 +226,19 @@ CREATE TABLE users (
     other_id INTEGER REFERENCES groups (group_id) MATCH SIMPLE
 );
 
-CREATE TABLE orders
-(
-id bigint NOT NULL DEFAULT NEXTVAL('orders_id_seq'::regclass),
-constraint_collate_constraints text UNIQUE COLLATE numeric NOT NULL PRIMARY KEY,
-constraints_collate text NOT NULL UNIQUE COLLATE numeric,
-collate_constraints text COLLATE numeric NOT NULL UNIQUE
+CREATE TABLE orders (
+    id bigint NOT NULL DEFAULT NEXTVAL('orders_id_seq'::regclass),
+    constraint_collate_constraints text UNIQUE COLLATE numeric NOT NULL PRIMARY KEY,
+    constraints_collate text NOT NULL UNIQUE COLLATE numeric,
+    collate_constraints text COLLATE numeric NOT NULL UNIQUE,
+    nulls_distinct text UNIQUE NULLS DISTINCT,
+    nulls_not_distinct text UNIQUE NULLS NOT DISTINCT,
+    everything text UNIQUE NULLS DISTINCT WITH (arg1=3, arg5='str')
+        USING INDEX TABLESPACE tblspace COLLATE numeric
+);
+
+CREATE TABLE primary_key_options (
+    everything int PRIMARY KEY WITH (arg1=3, arg5='str') USING INDEX TABLESPACE tblspace NOT NULL
 );
 
 
@@ -239,3 +247,95 @@ CREATE TABLE IF NOT EXISTS quotas.usage(foo int);
 
 -- Use non-reserved `usage` word as a column identifier
 CREATE TABLE IF NOT EXISTS quotas.my_table(usage int);
+
+-- NOT NULL both before and after a default constraint
+CREATE TABLE with_constraints1 (
+    col_1 boolean NOT NULL DEFAULT false
+);
+CREATE TABLE with_constraints2 (
+    col_1 boolean DEFAULT false NOT NULL
+);
+
+-- default constraint expression
+CREATE TABLE with_constraints3 (
+    col_1 int DEFAULT (1 + 2) * (3 + 4) NOT NULL
+);
+CREATE TABLE with_constraints33 (
+    col_1 int DEFAULT 1 + 2 * 3 + 4 NOT NULL
+);
+CREATE TABLE with_constraints4 (
+    col_1 int DEFAULT (1 + 2 * 3 + 4) NOT NULL
+);
+CREATE TABLE with_constraints5 (
+    col_1 bool DEFAULT (1 NOT IN (3, 4)) NOT NULL
+);
+CREATE TABLE with_constraints6 (
+    col_1 bool NOT NULL DEFAULT (5 NOT IN (5, 6))
+);
+
+CREATE TABLE test_with_storage_param (
+    col_1 boolean
+) WITH (autovacuum_enabled=true);
+
+
+CREATE TABLE test_with_storage_params (
+    col_1 boolean
+) WITH (autovacuum_enabled=true, vacuum_truncate=false);
+
+CREATE TABLE tbl (
+    -- All forms of character data types listed at:
+    -- https://www.postgresql.org/docs/current/datatype-character.html
+    col_char_varying_unlimited character varying,
+    col_char_varying_limited character varying(50),
+    col_varchar_unlimited varchar,
+    col_varchar_limited varchar(50),
+
+    col_character_default character,
+    col_character_specified character(50),
+    col_char_default char,
+    col_char_specified character(50),
+
+    col_text text,
+
+    -- some types you'll find in pg_catalog
+    col_system_char "char", -- this is NOT the same as unquoted char
+    col_name name
+);
+
+-- Test out EXCLUDE constraints, as well as other more advanced index parameters on constraints
+
+-- from https://www.postgresql.org/docs/15/rangetypes.html: basic usage
+CREATE TABLE reservation (
+    during tsrange,
+    EXCLUDE USING gist (during WITH &&)
+);
+CREATE TABLE room_reservation (
+    room text,
+    during tsrange,
+    EXCLUDE USING gist (room WITH =, during WITH &&)
+);
+
+-- all the gnarly options: not every option is valid, but this will parse successfully on PG 15.
+CREATE TABLE no_using (
+    field text,
+    EXCLUDE (field WITH =) NOT DEFERRABLE INITIALLY IMMEDIATE NO INHERIT
+);
+CREATE TABLE many_options (
+    field text,
+    EXCLUDE USING gist (
+        one WITH =,
+        nulls_opclass nulls WITH =,
+        nulls_last NULLS LAST WITH =,
+        two COLLATE "en-US" opclass
+            (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC)
+            ASC NULLS FIRST WITH =,
+        (two + 5) WITH =,
+        myfunc(a, b) WITH =,
+        myfunc_opclass(a, b) fop (opt=1, foo=2) WITH =,
+        only_opclass opclass WITH =,
+        desc_order DESC WITH =
+    ) INCLUDE (a, b) WITH (idx_num = 5, idx_str = 'idx_value', idx_kw=DESC)
+        USING INDEX TABLESPACE tblspc
+        WHERE (field != 'def')
+        DEFERRABLE INITIALLY DEFERRED
+);
diff --git a/test/fixtures/dialects/postgres/postgres_create_table.yml b/test/fixtures/dialects/postgres/postgres_create_table.yml
index 52f9a84..8887465 100644
--- a/test/fixtures/dialects/postgres/postgres_create_table.yml
+++ b/test/fixtures/dialects/postgres/postgres_create_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: c32acebaebd1047787a609acea7ae54506c48a8c08a4fdd1b513c6f284b5725b
+_hash: 018c70d55089f99a37e106fa0831d6f42012e7b23f5a4ddfdc15b230df2d45a8
 file:
 - statement:
     create_table_statement:
@@ -47,10 +47,11 @@ file:
           naked_identifier: name
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '40'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '40'
+              end_bracket: )
       - column_constraint_segment:
         - keyword: NOT
         - keyword: 'NULL'
@@ -68,6 +69,24 @@ file:
             end_bracket: )
       - end_bracket: )
 - statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: array_int
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: vector
+        data_type:
+        - keyword: int
+        - start_square_bracket: '['
+        - end_square_bracket: ']'
+        - start_square_bracket: '['
+        - end_square_bracket: ']'
+        end_bracket: )
+- statement_terminator: ;
 - statement:
     create_table_statement:
     - keyword: CREATE
@@ -96,10 +115,22 @@ file:
           naked_identifier: name
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '40'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '40'
+              end_bracket: )
+      - comma: ','
+      - column_reference:
+          naked_identifier: long_varying
+      - data_type:
+        - keyword: char
+        - keyword: varying
+        - bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '100'
+              end_bracket: )
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -119,10 +150,11 @@ file:
           naked_identifier: name
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '40'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '40'
+              end_bracket: )
       - comma: ','
       - table_constraint:
         - keyword: CONSTRAINT
@@ -164,10 +196,11 @@ file:
           naked_identifier: name
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '40'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '40'
+              end_bracket: )
       - comma: ','
       - table_constraint:
         - keyword: PRIMARY
@@ -199,10 +232,11 @@ file:
           naked_identifier: name
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '40'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '40'
+              end_bracket: )
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -217,10 +251,11 @@ file:
           naked_identifier: name
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '40'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '40'
+              end_bracket: )
       - column_constraint_segment:
           keyword: DEFAULT
           quoted_literal: "'Luso Films'"
@@ -273,10 +308,11 @@ file:
           naked_identifier: name
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '40'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '40'
+              end_bracket: )
       - column_constraint_segment:
         - keyword: NOT
         - keyword: 'NULL'
@@ -299,10 +335,11 @@ file:
           naked_identifier: name
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '40'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '40'
+              end_bracket: )
       - column_constraint_segment:
           keyword: UNIQUE
       - end_bracket: )
@@ -324,10 +361,11 @@ file:
           naked_identifier: name
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '40'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '40'
+              end_bracket: )
       - comma: ','
       - table_constraint:
           keyword: UNIQUE
@@ -355,10 +393,11 @@ file:
           naked_identifier: name
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '40'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '40'
+              end_bracket: )
       - comma: ','
       - table_constraint:
           keyword: UNIQUE
@@ -369,22 +408,26 @@ file:
             end_bracket: )
           index_parameters:
             keyword: WITH
-            bracketed:
-              start_bracket: (
-              parameter: fillfactor
-              comparison_operator:
-                raw_comparison_operator: '='
-              numeric_literal: '70'
-              end_bracket: )
+            definition_parameters:
+              bracketed:
+                start_bracket: (
+                definition_parameter:
+                  properties_naked_identifier: fillfactor
+                  comparison_operator:
+                    raw_comparison_operator: '='
+                  numeric_literal: '70'
+                end_bracket: )
       - end_bracket: )
     - keyword: WITH
-    - bracketed:
-        start_bracket: (
-        parameter: fillfactor
-        comparison_operator:
-          raw_comparison_operator: '='
-        numeric_literal: '70'
-        end_bracket: )
+    - relation_options:
+        bracketed:
+          start_bracket: (
+          relation_option:
+            properties_naked_identifier: fillfactor
+            comparison_operator:
+              raw_comparison_operator: '='
+            numeric_literal: '70'
+          end_bracket: )
 - statement_terminator: ;
 - statement:
     create_table_statement:
@@ -422,11 +465,11 @@ file:
     - keyword: AS
     - bracketed:
       - start_bracket: (
-      - raw: name
-      - raw: text
+      - code: name
+      - code: text
       - comma: ','
-      - raw: salary
-      - raw: numeric
+      - code: salary
+      - code: numeric
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -1202,12 +1245,11 @@ file:
         - keyword: 'NULL'
       - column_constraint_segment:
           keyword: DEFAULT
-          expression:
-            cast_expression:
-              quoted_literal: "'-1'"
-              casting_operator: '::'
-              data_type:
-                keyword: integer
+          cast_expression:
+            quoted_literal: "'-1'"
+            casting_operator: '::'
+            data_type:
+              keyword: integer
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -1225,10 +1267,11 @@ file:
       - data_type:
         - keyword: character
         - keyword: varying
-        - bracketed:
-            start_bracket: (
-            numeric_literal: '40'
-            end_bracket: )
+        - bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '40'
+              end_bracket: )
       - column_constraint_segment:
         - keyword: NOT
         - keyword: 'NULL'
@@ -1407,6 +1450,99 @@ file:
         - keyword: 'NULL'
       - column_constraint_segment:
           keyword: UNIQUE
+      - comma: ','
+      - column_reference:
+          naked_identifier: nulls_distinct
+      - data_type:
+          keyword: text
+      - column_constraint_segment:
+        - keyword: UNIQUE
+        - keyword: NULLS
+        - keyword: DISTINCT
+      - comma: ','
+      - column_reference:
+          naked_identifier: nulls_not_distinct
+      - data_type:
+          keyword: text
+      - column_constraint_segment:
+        - keyword: UNIQUE
+        - keyword: NULLS
+        - keyword: NOT
+        - keyword: DISTINCT
+      - comma: ','
+      - column_reference:
+          naked_identifier: everything
+      - data_type:
+          keyword: text
+      - column_constraint_segment:
+        - keyword: UNIQUE
+        - keyword: NULLS
+        - keyword: DISTINCT
+        - keyword: WITH
+        - definition_parameters:
+            bracketed:
+            - start_bracket: (
+            - definition_parameter:
+                properties_naked_identifier: arg1
+                comparison_operator:
+                  raw_comparison_operator: '='
+                numeric_literal: '3'
+            - comma: ','
+            - definition_parameter:
+                properties_naked_identifier: arg5
+                comparison_operator:
+                  raw_comparison_operator: '='
+                quoted_literal: "'str'"
+            - end_bracket: )
+        - keyword: USING
+        - keyword: INDEX
+        - keyword: TABLESPACE
+        - tablespace_reference:
+            naked_identifier: tblspace
+      - keyword: COLLATE
+      - object_reference:
+          naked_identifier: numeric
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: primary_key_options
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: everything
+      - data_type:
+          keyword: int
+      - column_constraint_segment:
+        - keyword: PRIMARY
+        - keyword: KEY
+        - keyword: WITH
+        - definition_parameters:
+            bracketed:
+            - start_bracket: (
+            - definition_parameter:
+                properties_naked_identifier: arg1
+                comparison_operator:
+                  raw_comparison_operator: '='
+                numeric_literal: '3'
+            - comma: ','
+            - definition_parameter:
+                properties_naked_identifier: arg5
+                comparison_operator:
+                  raw_comparison_operator: '='
+                quoted_literal: "'str'"
+            - end_bracket: )
+        - keyword: USING
+        - keyword: INDEX
+        - keyword: TABLESPACE
+        - tablespace_reference:
+            naked_identifier: tblspace
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -1447,3 +1583,706 @@ file:
           keyword: int
         end_bracket: )
 - statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: with_constraints1
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: col_1
+      - data_type:
+          keyword: boolean
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - column_constraint_segment:
+          keyword: DEFAULT
+          boolean_literal: 'false'
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: with_constraints2
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: col_1
+      - data_type:
+          keyword: boolean
+      - column_constraint_segment:
+          keyword: DEFAULT
+          boolean_literal: 'false'
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: with_constraints3
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: col_1
+      - data_type:
+          keyword: int
+      - column_constraint_segment:
+          keyword: DEFAULT
+          expression:
+          - bracketed:
+              start_bracket: (
+              expression:
+              - numeric_literal: '1'
+              - binary_operator: +
+              - numeric_literal: '2'
+              end_bracket: )
+          - binary_operator: '*'
+          - bracketed:
+              start_bracket: (
+              expression:
+              - numeric_literal: '3'
+              - binary_operator: +
+              - numeric_literal: '4'
+              end_bracket: )
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: with_constraints33
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: col_1
+      - data_type:
+          keyword: int
+      - column_constraint_segment:
+          keyword: DEFAULT
+          expression:
+          - numeric_literal: '1'
+          - binary_operator: +
+          - numeric_literal: '2'
+          - binary_operator: '*'
+          - numeric_literal: '3'
+          - binary_operator: +
+          - numeric_literal: '4'
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: with_constraints4
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: col_1
+      - data_type:
+          keyword: int
+      - column_constraint_segment:
+          keyword: DEFAULT
+          expression:
+            bracketed:
+              start_bracket: (
+              expression:
+              - numeric_literal: '1'
+              - binary_operator: +
+              - numeric_literal: '2'
+              - binary_operator: '*'
+              - numeric_literal: '3'
+              - binary_operator: +
+              - numeric_literal: '4'
+              end_bracket: )
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: with_constraints5
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: col_1
+      - data_type:
+          keyword: bool
+      - column_constraint_segment:
+          keyword: DEFAULT
+          expression:
+            bracketed:
+              start_bracket: (
+              expression:
+              - numeric_literal: '1'
+              - keyword: NOT
+              - keyword: IN
+              - bracketed:
+                - start_bracket: (
+                - numeric_literal: '3'
+                - comma: ','
+                - numeric_literal: '4'
+                - end_bracket: )
+              end_bracket: )
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: with_constraints6
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: col_1
+      - data_type:
+          keyword: bool
+      - column_constraint_segment:
+        - keyword: NOT
+        - keyword: 'NULL'
+      - column_constraint_segment:
+          keyword: DEFAULT
+          expression:
+            bracketed:
+              start_bracket: (
+              expression:
+              - numeric_literal: '5'
+              - keyword: NOT
+              - keyword: IN
+              - bracketed:
+                - start_bracket: (
+                - numeric_literal: '5'
+                - comma: ','
+                - numeric_literal: '6'
+                - end_bracket: )
+              end_bracket: )
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: test_with_storage_param
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: col_1
+        data_type:
+          keyword: boolean
+        end_bracket: )
+    - keyword: WITH
+    - relation_options:
+        bracketed:
+          start_bracket: (
+          relation_option:
+            properties_naked_identifier: autovacuum_enabled
+            comparison_operator:
+              raw_comparison_operator: '='
+            boolean_literal: 'true'
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: test_with_storage_params
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: col_1
+        data_type:
+          keyword: boolean
+        end_bracket: )
+    - keyword: WITH
+    - relation_options:
+        bracketed:
+        - start_bracket: (
+        - relation_option:
+            properties_naked_identifier: autovacuum_enabled
+            comparison_operator:
+              raw_comparison_operator: '='
+            boolean_literal: 'true'
+        - comma: ','
+        - relation_option:
+            properties_naked_identifier: vacuum_truncate
+            comparison_operator:
+              raw_comparison_operator: '='
+            boolean_literal: 'false'
+        - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: tbl
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: col_char_varying_unlimited
+      - data_type:
+        - keyword: character
+        - keyword: varying
+      - comma: ','
+      - column_reference:
+          naked_identifier: col_char_varying_limited
+      - data_type:
+        - keyword: character
+        - keyword: varying
+        - bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '50'
+              end_bracket: )
+      - comma: ','
+      - column_reference:
+          naked_identifier: col_varchar_unlimited
+      - data_type:
+          keyword: varchar
+      - comma: ','
+      - column_reference:
+          naked_identifier: col_varchar_limited
+      - data_type:
+          keyword: varchar
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '50'
+              end_bracket: )
+      - comma: ','
+      - column_reference:
+          naked_identifier: col_character_default
+      - data_type:
+          keyword: character
+      - comma: ','
+      - column_reference:
+          naked_identifier: col_character_specified
+      - data_type:
+          keyword: character
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '50'
+              end_bracket: )
+      - comma: ','
+      - column_reference:
+          naked_identifier: col_char_default
+      - data_type:
+          keyword: char
+      - comma: ','
+      - column_reference:
+          naked_identifier: col_char_specified
+      - data_type:
+          keyword: character
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '50'
+              end_bracket: )
+      - comma: ','
+      - column_reference:
+          naked_identifier: col_text
+      - data_type:
+          keyword: text
+      - comma: ','
+      - column_reference:
+          naked_identifier: col_system_char
+      - data_type:
+          quoted_identifier: '"char"'
+      - comma: ','
+      - column_reference:
+          naked_identifier: col_name
+      - data_type:
+          data_type_identifier: name
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: reservation
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: during
+        data_type:
+          keyword: tsrange
+        comma: ','
+        table_constraint:
+        - keyword: EXCLUDE
+        - keyword: USING
+        - index_access_method:
+            naked_identifier: gist
+        - bracketed:
+            start_bracket: (
+            exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: during
+              keyword: WITH
+              comparison_operator:
+              - ampersand: '&'
+              - ampersand: '&'
+            end_bracket: )
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: room_reservation
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: room
+      - data_type:
+          keyword: text
+      - comma: ','
+      - column_reference:
+          naked_identifier: during
+      - data_type:
+          keyword: tsrange
+      - comma: ','
+      - table_constraint:
+        - keyword: EXCLUDE
+        - keyword: USING
+        - index_access_method:
+            naked_identifier: gist
+        - bracketed:
+          - start_bracket: (
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: room
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: during
+              keyword: WITH
+              comparison_operator:
+              - ampersand: '&'
+              - ampersand: '&'
+          - end_bracket: )
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: no_using
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: field
+        data_type:
+          keyword: text
+        comma: ','
+        table_constraint:
+        - keyword: EXCLUDE
+        - bracketed:
+            start_bracket: (
+            exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: field
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+            end_bracket: )
+        - keyword: NOT
+        - keyword: DEFERRABLE
+        - keyword: INITIALLY
+        - keyword: IMMEDIATE
+        - keyword: 'NO'
+        - keyword: INHERIT
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: many_options
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: field
+        data_type:
+          keyword: text
+        comma: ','
+        table_constraint:
+        - keyword: EXCLUDE
+        - keyword: USING
+        - index_access_method:
+            naked_identifier: gist
+        - bracketed:
+          - start_bracket: (
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: one
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: nulls_opclass
+                index_element_options:
+                  operator_class_reference:
+                    naked_identifier: nulls
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: nulls_last
+                index_element_options:
+                - keyword: NULLS
+                - keyword: LAST
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: two
+                index_element_options:
+                - keyword: COLLATE
+                - collation_reference:
+                    quoted_identifier: '"en-US"'
+                - operator_class_reference:
+                    naked_identifier: opclass
+                - relation_options:
+                    bracketed:
+                    - start_bracket: (
+                    - relation_option:
+                        properties_naked_identifier: opt1
+                    - comma: ','
+                    - relation_option:
+                        properties_naked_identifier: opt2
+                        comparison_operator:
+                          raw_comparison_operator: '='
+                        numeric_literal: '5'
+                    - comma: ','
+                    - relation_option:
+                        properties_naked_identifier: opt3
+                        comparison_operator:
+                          raw_comparison_operator: '='
+                        quoted_literal: "'str'"
+                    - comma: ','
+                    - relation_option:
+                      - properties_naked_identifier: ns
+                      - dot: .
+                      - properties_naked_identifier: opt4
+                    - comma: ','
+                    - relation_option:
+                      - properties_naked_identifier: ns
+                      - dot: .
+                      - properties_naked_identifier: opt5
+                      - comparison_operator:
+                          raw_comparison_operator: '='
+                      - numeric_literal: '6'
+                    - comma: ','
+                    - relation_option:
+                      - properties_naked_identifier: ns
+                      - dot: .
+                      - properties_naked_identifier: opt6
+                      - comparison_operator:
+                          raw_comparison_operator: '='
+                      - quoted_literal: "'str'"
+                    - comma: ','
+                    - relation_option:
+                      - properties_naked_identifier: opt7
+                      - comparison_operator:
+                          raw_comparison_operator: '='
+                      - properties_naked_identifier: ASC
+                    - end_bracket: )
+                - keyword: ASC
+                - keyword: NULLS
+                - keyword: FIRST
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    column_reference:
+                      naked_identifier: two
+                    binary_operator: +
+                    numeric_literal: '5'
+                  end_bracket: )
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                function:
+                  function_name:
+                    function_name_identifier: myfunc
+                  bracketed:
+                  - start_bracket: (
+                  - expression:
+                      column_reference:
+                        naked_identifier: a
+                  - comma: ','
+                  - expression:
+                      column_reference:
+                        naked_identifier: b
+                  - end_bracket: )
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                function:
+                  function_name:
+                    function_name_identifier: myfunc_opclass
+                  bracketed:
+                  - start_bracket: (
+                  - expression:
+                      column_reference:
+                        naked_identifier: a
+                  - comma: ','
+                  - expression:
+                      column_reference:
+                        naked_identifier: b
+                  - end_bracket: )
+                index_element_options:
+                  operator_class_reference:
+                    naked_identifier: fop
+                  relation_options:
+                    bracketed:
+                    - start_bracket: (
+                    - relation_option:
+                        properties_naked_identifier: opt
+                        comparison_operator:
+                          raw_comparison_operator: '='
+                        numeric_literal: '1'
+                    - comma: ','
+                    - relation_option:
+                        properties_naked_identifier: foo
+                        comparison_operator:
+                          raw_comparison_operator: '='
+                        numeric_literal: '2'
+                    - end_bracket: )
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: only_opclass
+                index_element_options:
+                  operator_class_reference:
+                    naked_identifier: opclass
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - comma: ','
+          - exclusion_constraint_element:
+              index_element:
+                column_reference:
+                  naked_identifier: desc_order
+                index_element_options:
+                  keyword: DESC
+              keyword: WITH
+              comparison_operator:
+                raw_comparison_operator: '='
+          - end_bracket: )
+        - index_parameters:
+          - keyword: INCLUDE
+          - bracketed:
+            - start_bracket: (
+            - column_reference:
+                naked_identifier: a
+            - comma: ','
+            - column_reference:
+                naked_identifier: b
+            - end_bracket: )
+          - keyword: WITH
+          - definition_parameters:
+              bracketed:
+              - start_bracket: (
+              - definition_parameter:
+                  properties_naked_identifier: idx_num
+                  comparison_operator:
+                    raw_comparison_operator: '='
+                  numeric_literal: '5'
+              - comma: ','
+              - definition_parameter:
+                  properties_naked_identifier: idx_str
+                  comparison_operator:
+                    raw_comparison_operator: '='
+                  quoted_literal: "'idx_value'"
+              - comma: ','
+              - definition_parameter:
+                - properties_naked_identifier: idx_kw
+                - comparison_operator:
+                    raw_comparison_operator: '='
+                - properties_naked_identifier: DESC
+              - end_bracket: )
+          - keyword: USING
+          - keyword: INDEX
+          - keyword: TABLESPACE
+          - tablespace_reference:
+              naked_identifier: tblspc
+        - keyword: WHERE
+        - bracketed:
+            start_bracket: (
+            expression:
+              column_reference:
+                naked_identifier: field
+              comparison_operator:
+              - raw_comparison_operator: '!'
+              - raw_comparison_operator: '='
+              quoted_literal: "'def'"
+            end_bracket: )
+        - keyword: DEFERRABLE
+        - keyword: INITIALLY
+        - keyword: DEFERRED
+        end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_create_table_as.sql b/test/fixtures/dialects/postgres/postgres_create_table_as.sql
index 7cc7b14..e496d10 100644
--- a/test/fixtures/dialects/postgres/postgres_create_table_as.sql
+++ b/test/fixtures/dialects/postgres/postgres_create_table_as.sql
@@ -116,3 +116,20 @@ CREATE TABLE t1 WITH (val=70) AS
     SELECT something
     FROM t2
 ;
+
+create temp table t1
+with (autovacuum_enabled = true, toast_tuple_target = 123, vacuum_index_cleanup = false) as
+select
+    column_1
+    , column_2
+    , column_3
+from tablename;
+
+create temp table a_new_table
+with (appendoptimized = true, compresstype = zstd) as
+select
+    column_1
+    , column_2
+    , column_3
+from schema.tablename
+group by 1, 2, 3;
diff --git a/test/fixtures/dialects/postgres/postgres_create_table_as.yml b/test/fixtures/dialects/postgres/postgres_create_table_as.yml
index 4ba01bb..d6f087a 100644
--- a/test/fixtures/dialects/postgres/postgres_create_table_as.yml
+++ b/test/fixtures/dialects/postgres/postgres_create_table_as.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 400575378a328cfd4f1f25bebd8703ee72ed4a8f4fc7ea9abc45593aa25b8734
+_hash: 1ae3c61fd8e9753e50ab692f5949825c30fa47744146dc30d1fa774fca367f3d
 file:
 - statement:
     create_table_as_statement:
@@ -558,3 +558,104 @@ file:
                 table_reference:
                   naked_identifier: t2
 - statement_terminator: ;
+- statement:
+    create_table_as_statement:
+    - keyword: create
+    - keyword: temp
+    - keyword: table
+    - table_reference:
+        naked_identifier: t1
+    - keyword: with
+    - bracketed:
+      - start_bracket: (
+      - parameter: autovacuum_enabled
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - boolean_literal: 'true'
+      - comma: ','
+      - parameter: toast_tuple_target
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - numeric_literal: '123'
+      - comma: ','
+      - parameter: vacuum_index_cleanup
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - boolean_literal: 'false'
+      - end_bracket: )
+    - keyword: as
+    - select_statement:
+        select_clause:
+        - keyword: select
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column_1
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column_2
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column_3
+        from_clause:
+          keyword: from
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: tablename
+- statement_terminator: ;
+- statement:
+    create_table_as_statement:
+    - keyword: create
+    - keyword: temp
+    - keyword: table
+    - table_reference:
+        naked_identifier: a_new_table
+    - keyword: with
+    - bracketed:
+      - start_bracket: (
+      - parameter: appendoptimized
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - boolean_literal: 'true'
+      - comma: ','
+      - parameter: compresstype
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - naked_identifier: zstd
+      - end_bracket: )
+    - keyword: as
+    - select_statement:
+        select_clause:
+        - keyword: select
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column_1
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column_2
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: column_3
+        from_clause:
+          keyword: from
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                - naked_identifier: schema
+                - dot: .
+                - naked_identifier: tablename
+        groupby_clause:
+        - keyword: group
+        - keyword: by
+        - numeric_literal: '1'
+        - comma: ','
+        - numeric_literal: '2'
+        - comma: ','
+        - numeric_literal: '3'
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_create_type.yml b/test/fixtures/dialects/postgres/postgres_create_type.yml
index 1c309bb..962a164 100644
--- a/test/fixtures/dialects/postgres/postgres_create_type.yml
+++ b/test/fixtures/dialects/postgres/postgres_create_type.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: b996a018c7e24a91965a3214b8747f2cf73dbe4b540672aaab01848ffdfe7356
+_hash: 13bcdb2d8d6b13a8e0d3029166a5e129a68c54ffc57a18d5ba3e7b378366ddc8
 file:
 - statement:
     create_type_statement:
@@ -49,9 +49,9 @@ file:
     - keyword: RANGE
     - bracketed:
       - start_bracket: (
-      - raw: SUBTYPE
+      - code: SUBTYPE
       - raw: '='
-      - raw: FLOAT
+      - code: FLOAT
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -63,13 +63,13 @@ file:
     - keyword: AS
     - bracketed:
       - start_bracket: (
-      - raw: INPUT
+      - code: INPUT
       - raw: '='
-      - raw: foo
+      - code: foo
       - comma: ','
-      - raw: OUTPUT
+      - code: OUTPUT
       - raw: '='
-      - raw: bar
+      - code: bar
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -81,9 +81,9 @@ file:
     - keyword: AS
     - bracketed:
       - start_bracket: (
-      - raw: foo
-      - raw: varchar
-      - raw: collate
-      - raw: utf8
+      - code: foo
+      - code: varchar
+      - code: collate
+      - code: utf8
       - end_bracket: )
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_create_view.sql b/test/fixtures/dialects/postgres/postgres_create_view.sql
new file mode 100644
index 0000000..a47523a
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_create_view.sql
@@ -0,0 +1,64 @@
+CREATE VIEW vista AS SELECT 'Hello World';
+
+CREATE OR REPLACE VIEW vista AS SELECT 'Hello World';
+
+CREATE VIEW vista AS SELECT text 'Hello World' AS hello;
+
+CREATE TEMP VIEW vista AS SELECT text 'Hello World' AS hello;
+
+CREATE TEMPORARY VIEW  vista AS SELECT text 'Hello World' AS hello;
+
+CREATE VIEW comedies AS
+    SELECT *
+    FROM films
+    WHERE kind = 'Comedy';
+
+CREATE VIEW pg_comedies AS
+    VALUES (1, 'one'), (2, 'two'), (3, 'three')
+    WITH LOCAL CHECK OPTION;
+
+CREATE VIEW pg_comedies AS
+    SELECT *
+    FROM comedies
+    WHERE classification = 'PG'
+    WITH CASCADED CHECK OPTION;
+create view foo with (security_invoker) as select 1;
+create view foo with (security_barrier) as select 1;
+
+create view foo with (security_invoker=BOOLEAN) as select 1;
+create view foo with (security_barrier=BOOLEAN) as select 1;
+
+create view foo with (check_option=local) as select * from OTHER_VIEW;
+create view foo with (check_option=cascaded) as select * from OTHER_VIEW;
+
+create view foo with (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC)
+    as select 1;
+
+create view foo as select * from OTHER_VIEW with local check option;
+create view foo as select * from OTHER_VIEW with cascaded check option;
+
+CREATE OR REPLACE RECURSIVE VIEW "grouping_node" (
+  "node_id",
+  "ancestors",
+  "category_id",
+  "path",
+  "path_nodes"
+) AS
+
+SELECT "group_id" AS "node_id",
+       ARRAY[]::INTEGER[] AS "ancestors",
+       "category_id",
+       ARRAY["name"]::text[] AS "path",
+       ARRAY["group_id"]::INTEGER[] AS "path_nodes"
+  FROM "grouping_managementgroup"
+ WHERE "parent_id" IS NULL
+
+ UNION ALL
+
+SELECT "group_id",
+       "ancestors" || "parent_id",
+       "grouping_node"."category_id",
+       "path" || "name"::text,
+       "path_nodes" || "group_id"
+FROM "grouping_managementgroup", "grouping_node"
+WHERE "parent_id" = "node_id";
diff --git a/test/fixtures/dialects/postgres/postgres_create_view.yml b/test/fixtures/dialects/postgres/postgres_create_view.yml
new file mode 100644
index 0000000..2145f99
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_create_view.yml
@@ -0,0 +1,648 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 80226c3fc2d057dac32c44b6c636966e669971a1cb6dbbffb2c994b817e8a3a3
+file:
+- statement:
+    create_view_statement:
+    - keyword: CREATE
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: vista
+    - keyword: AS
+    - select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            quoted_literal: "'Hello World'"
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: CREATE
+    - keyword: OR
+    - keyword: REPLACE
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: vista
+    - keyword: AS
+    - select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            quoted_literal: "'Hello World'"
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: CREATE
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: vista
+    - keyword: AS
+    - select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            expression:
+              data_type:
+                keyword: text
+              quoted_literal: "'Hello World'"
+            alias_expression:
+              keyword: AS
+              naked_identifier: hello
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: CREATE
+    - keyword: TEMP
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: vista
+    - keyword: AS
+    - select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            expression:
+              data_type:
+                keyword: text
+              quoted_literal: "'Hello World'"
+            alias_expression:
+              keyword: AS
+              naked_identifier: hello
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: CREATE
+    - keyword: TEMPORARY
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: vista
+    - keyword: AS
+    - select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            expression:
+              data_type:
+                keyword: text
+              quoted_literal: "'Hello World'"
+            alias_expression:
+              keyword: AS
+              naked_identifier: hello
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: CREATE
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: comedies
+    - keyword: AS
+    - select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: films
+        where_clause:
+          keyword: WHERE
+          expression:
+            column_reference:
+              naked_identifier: kind
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'Comedy'"
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: CREATE
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: pg_comedies
+    - keyword: AS
+    - values_clause:
+      - keyword: VALUES
+      - bracketed:
+        - start_bracket: (
+        - expression:
+            numeric_literal: '1'
+        - comma: ','
+        - expression:
+            quoted_literal: "'one'"
+        - end_bracket: )
+      - comma: ','
+      - bracketed:
+        - start_bracket: (
+        - expression:
+            numeric_literal: '2'
+        - comma: ','
+        - expression:
+            quoted_literal: "'two'"
+        - end_bracket: )
+      - comma: ','
+      - bracketed:
+        - start_bracket: (
+        - expression:
+            numeric_literal: '3'
+        - comma: ','
+        - expression:
+            quoted_literal: "'three'"
+        - end_bracket: )
+    - with_check_option:
+      - keyword: WITH
+      - keyword: LOCAL
+      - keyword: CHECK
+      - keyword: OPTION
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: CREATE
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: pg_comedies
+    - keyword: AS
+    - select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: comedies
+        where_clause:
+          keyword: WHERE
+          expression:
+            column_reference:
+              naked_identifier: classification
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'PG'"
+    - with_check_option:
+      - keyword: WITH
+      - keyword: CASCADED
+      - keyword: CHECK
+      - keyword: OPTION
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: create
+    - keyword: view
+    - table_reference:
+        naked_identifier: foo
+    - keyword: with
+    - relation_options:
+        bracketed:
+          start_bracket: (
+          relation_option:
+            properties_naked_identifier: security_invoker
+          end_bracket: )
+    - keyword: as
+    - select_statement:
+        select_clause:
+          keyword: select
+          select_clause_element:
+            numeric_literal: '1'
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: create
+    - keyword: view
+    - table_reference:
+        naked_identifier: foo
+    - keyword: with
+    - relation_options:
+        bracketed:
+          start_bracket: (
+          relation_option:
+            properties_naked_identifier: security_barrier
+          end_bracket: )
+    - keyword: as
+    - select_statement:
+        select_clause:
+          keyword: select
+          select_clause_element:
+            numeric_literal: '1'
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: create
+    - keyword: view
+    - table_reference:
+        naked_identifier: foo
+    - keyword: with
+    - relation_options:
+        bracketed:
+          start_bracket: (
+          relation_option:
+          - properties_naked_identifier: security_invoker
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - properties_naked_identifier: BOOLEAN
+          end_bracket: )
+    - keyword: as
+    - select_statement:
+        select_clause:
+          keyword: select
+          select_clause_element:
+            numeric_literal: '1'
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: create
+    - keyword: view
+    - table_reference:
+        naked_identifier: foo
+    - keyword: with
+    - relation_options:
+        bracketed:
+          start_bracket: (
+          relation_option:
+          - properties_naked_identifier: security_barrier
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - properties_naked_identifier: BOOLEAN
+          end_bracket: )
+    - keyword: as
+    - select_statement:
+        select_clause:
+          keyword: select
+          select_clause_element:
+            numeric_literal: '1'
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: create
+    - keyword: view
+    - table_reference:
+        naked_identifier: foo
+    - keyword: with
+    - relation_options:
+        bracketed:
+          start_bracket: (
+          relation_option:
+          - properties_naked_identifier: check_option
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - properties_naked_identifier: local
+          end_bracket: )
+    - keyword: as
+    - select_statement:
+        select_clause:
+          keyword: select
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: from
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: OTHER_VIEW
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: create
+    - keyword: view
+    - table_reference:
+        naked_identifier: foo
+    - keyword: with
+    - relation_options:
+        bracketed:
+          start_bracket: (
+          relation_option:
+          - properties_naked_identifier: check_option
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - properties_naked_identifier: cascaded
+          end_bracket: )
+    - keyword: as
+    - select_statement:
+        select_clause:
+          keyword: select
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: from
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: OTHER_VIEW
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: create
+    - keyword: view
+    - table_reference:
+        naked_identifier: foo
+    - keyword: with
+    - relation_options:
+        bracketed:
+        - start_bracket: (
+        - relation_option:
+            properties_naked_identifier: opt1
+        - comma: ','
+        - relation_option:
+            properties_naked_identifier: opt2
+            comparison_operator:
+              raw_comparison_operator: '='
+            numeric_literal: '5'
+        - comma: ','
+        - relation_option:
+            properties_naked_identifier: opt3
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'str'"
+        - comma: ','
+        - relation_option:
+          - properties_naked_identifier: ns
+          - dot: .
+          - properties_naked_identifier: opt4
+        - comma: ','
+        - relation_option:
+          - properties_naked_identifier: ns
+          - dot: .
+          - properties_naked_identifier: opt5
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - numeric_literal: '6'
+        - comma: ','
+        - relation_option:
+          - properties_naked_identifier: ns
+          - dot: .
+          - properties_naked_identifier: opt6
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - quoted_literal: "'str'"
+        - comma: ','
+        - relation_option:
+          - properties_naked_identifier: opt7
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - properties_naked_identifier: ASC
+        - end_bracket: )
+    - keyword: as
+    - select_statement:
+        select_clause:
+          keyword: select
+          select_clause_element:
+            numeric_literal: '1'
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: create
+    - keyword: view
+    - table_reference:
+        naked_identifier: foo
+    - keyword: as
+    - select_statement:
+        select_clause:
+          keyword: select
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: from
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: OTHER_VIEW
+    - with_check_option:
+      - keyword: with
+      - keyword: local
+      - keyword: check
+      - keyword: option
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: create
+    - keyword: view
+    - table_reference:
+        naked_identifier: foo
+    - keyword: as
+    - select_statement:
+        select_clause:
+          keyword: select
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: from
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: OTHER_VIEW
+    - with_check_option:
+      - keyword: with
+      - keyword: cascaded
+      - keyword: check
+      - keyword: option
+- statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: CREATE
+    - keyword: OR
+    - keyword: REPLACE
+    - keyword: RECURSIVE
+    - keyword: VIEW
+    - table_reference:
+        quoted_identifier: '"grouping_node"'
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          quoted_identifier: '"node_id"'
+      - comma: ','
+      - column_reference:
+          quoted_identifier: '"ancestors"'
+      - comma: ','
+      - column_reference:
+          quoted_identifier: '"category_id"'
+      - comma: ','
+      - column_reference:
+          quoted_identifier: '"path"'
+      - comma: ','
+      - column_reference:
+          quoted_identifier: '"path_nodes"'
+      - end_bracket: )
+    - keyword: AS
+    - set_expression:
+      - select_statement:
+          select_clause:
+          - keyword: SELECT
+          - select_clause_element:
+              column_reference:
+                quoted_identifier: '"group_id"'
+              alias_expression:
+                keyword: AS
+                quoted_identifier: '"node_id"'
+          - comma: ','
+          - select_clause_element:
+              expression:
+                cast_expression:
+                  typed_array_literal:
+                    array_type:
+                      keyword: ARRAY
+                    array_literal:
+                      start_square_bracket: '['
+                      end_square_bracket: ']'
+                  casting_operator: '::'
+                  data_type:
+                    keyword: INTEGER
+                    start_square_bracket: '['
+                    end_square_bracket: ']'
+              alias_expression:
+                keyword: AS
+                quoted_identifier: '"ancestors"'
+          - comma: ','
+          - select_clause_element:
+              column_reference:
+                quoted_identifier: '"category_id"'
+          - comma: ','
+          - select_clause_element:
+              expression:
+                cast_expression:
+                  typed_array_literal:
+                    array_type:
+                      keyword: ARRAY
+                    array_literal:
+                      start_square_bracket: '['
+                      column_reference:
+                        quoted_identifier: '"name"'
+                      end_square_bracket: ']'
+                  casting_operator: '::'
+                  data_type:
+                    keyword: text
+                    start_square_bracket: '['
+                    end_square_bracket: ']'
+              alias_expression:
+                keyword: AS
+                quoted_identifier: '"path"'
+          - comma: ','
+          - select_clause_element:
+              expression:
+                cast_expression:
+                  typed_array_literal:
+                    array_type:
+                      keyword: ARRAY
+                    array_literal:
+                      start_square_bracket: '['
+                      column_reference:
+                        quoted_identifier: '"group_id"'
+                      end_square_bracket: ']'
+                  casting_operator: '::'
+                  data_type:
+                    keyword: INTEGER
+                    start_square_bracket: '['
+                    end_square_bracket: ']'
+              alias_expression:
+                keyword: AS
+                quoted_identifier: '"path_nodes"'
+          from_clause:
+            keyword: FROM
+            from_expression:
+              from_expression_element:
+                table_expression:
+                  table_reference:
+                    quoted_identifier: '"grouping_managementgroup"'
+          where_clause:
+            keyword: WHERE
+            expression:
+            - column_reference:
+                quoted_identifier: '"parent_id"'
+            - keyword: IS
+            - keyword: 'NULL'
+      - set_operator:
+        - keyword: UNION
+        - keyword: ALL
+      - select_statement:
+          select_clause:
+          - keyword: SELECT
+          - select_clause_element:
+              column_reference:
+                quoted_identifier: '"group_id"'
+          - comma: ','
+          - select_clause_element:
+              expression:
+              - column_reference:
+                  quoted_identifier: '"ancestors"'
+              - binary_operator:
+                - pipe: '|'
+                - pipe: '|'
+              - column_reference:
+                  quoted_identifier: '"parent_id"'
+          - comma: ','
+          - select_clause_element:
+              column_reference:
+              - quoted_identifier: '"grouping_node"'
+              - dot: .
+              - quoted_identifier: '"category_id"'
+          - comma: ','
+          - select_clause_element:
+              expression:
+                column_reference:
+                  quoted_identifier: '"path"'
+                binary_operator:
+                - pipe: '|'
+                - pipe: '|'
+                cast_expression:
+                  column_reference:
+                    quoted_identifier: '"name"'
+                  casting_operator: '::'
+                  data_type:
+                    keyword: text
+          - comma: ','
+          - select_clause_element:
+              expression:
+              - column_reference:
+                  quoted_identifier: '"path_nodes"'
+              - binary_operator:
+                - pipe: '|'
+                - pipe: '|'
+              - column_reference:
+                  quoted_identifier: '"group_id"'
+          from_clause:
+          - keyword: FROM
+          - from_expression:
+              from_expression_element:
+                table_expression:
+                  table_reference:
+                    quoted_identifier: '"grouping_managementgroup"'
+          - comma: ','
+          - from_expression:
+              from_expression_element:
+                table_expression:
+                  table_reference:
+                    quoted_identifier: '"grouping_node"'
+          where_clause:
+            keyword: WHERE
+            expression:
+            - column_reference:
+                quoted_identifier: '"parent_id"'
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - column_reference:
+                quoted_identifier: '"node_id"'
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_datatypes.yml b/test/fixtures/dialects/postgres/postgres_datatypes.yml
index dd7813a..902f289 100644
--- a/test/fixtures/dialects/postgres/postgres_datatypes.yml
+++ b/test/fixtures/dialects/postgres/postgres_datatypes.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 4b44f5e6db8bbe365f3f41a7aa8f58f4b1ae4a9a66f0e725efc213d27108012d
+_hash: 7cf843dbbfd1ddd3907ede58652e56d6e0a9c7446e8b623461a83834f985e330
 file:
 - statement:
     create_table_statement:
@@ -107,10 +107,11 @@ file:
           naked_identifier: b
       - data_type:
           keyword: float
-          bracketed:
-            start_bracket: (
-            numeric_literal: '24'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '24'
+              end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: c
@@ -145,41 +146,45 @@ file:
           naked_identifier: b
       - data_type:
           keyword: numeric
-          bracketed:
-            start_bracket: (
-            numeric_literal: '7'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '7'
+              end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: ba
       - data_type:
           keyword: decimal
-          bracketed:
-            start_bracket: (
-            numeric_literal: '7'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '7'
+              end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: c
       - data_type:
           keyword: numeric
-          bracketed:
-          - start_bracket: (
-          - numeric_literal: '7'
-          - comma: ','
-          - numeric_literal: '2'
-          - end_bracket: )
+          bracketed_arguments:
+            bracketed:
+            - start_bracket: (
+            - numeric_literal: '7'
+            - comma: ','
+            - numeric_literal: '2'
+            - end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: ca
       - data_type:
           keyword: decimal
-          bracketed:
-          - start_bracket: (
-          - numeric_literal: '7'
-          - comma: ','
-          - numeric_literal: '2'
-          - end_bracket: )
+          bracketed_arguments:
+            bracketed:
+            - start_bracket: (
+            - numeric_literal: '7'
+            - comma: ','
+            - numeric_literal: '2'
+            - end_bracket: )
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -213,10 +218,11 @@ file:
           naked_identifier: b
       - data_type:
           keyword: char
-          bracketed:
-            start_bracket: (
-            numeric_literal: '7'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '7'
+              end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: c
@@ -227,10 +233,11 @@ file:
           naked_identifier: d
       - data_type:
           keyword: character
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: e
@@ -243,19 +250,21 @@ file:
       - data_type:
         - keyword: character
         - keyword: varying
-        - bracketed:
-            start_bracket: (
-            numeric_literal: '8'
-            end_bracket: )
+        - bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '8'
+              end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: g
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '9'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '9'
+              end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: h
@@ -520,10 +529,11 @@ file:
           naked_identifier: b
       - data_type:
           keyword: bit
-          bracketed:
-            start_bracket: (
-            numeric_literal: '3'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '3'
+              end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: c
@@ -536,10 +546,11 @@ file:
       - data_type:
         - keyword: bit
         - keyword: varying
-        - bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+        - bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -671,18 +682,21 @@ file:
       - column_reference:
           naked_identifier: e
       - data_type:
-        - keyword: money
-        - keyword: ARRAY
+          keyword: money
+          array_type:
+            keyword: ARRAY
       - comma: ','
       - column_reference:
           naked_identifier: f
       - data_type:
-        - keyword: money
-        - keyword: ARRAY
-        - array_literal:
-            start_square_bracket: '['
-            numeric_literal: '7'
-            end_square_bracket: ']'
+          keyword: money
+          sized_array_type:
+            array_type:
+              keyword: ARRAY
+            array_accessor:
+              start_square_bracket: '['
+              numeric_literal: '7'
+              end_square_bracket: ']'
       - end_bracket: )
 - statement_terminator: ;
 - statement:
diff --git a/test/fixtures/dialects/postgres/postgres_drop_cast.sql b/test/fixtures/dialects/postgres/postgres_drop_cast.sql
new file mode 100644
index 0000000..89b9820
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_drop_cast.sql
@@ -0,0 +1,17 @@
+-- ANSI SQL:
+
+DROP CAST (int AS bool);
+
+DROP CAST (int AS bool) RESTRICT;
+
+DROP CAST (int AS bool) CASCADE;
+
+DROP CAST (udt_1 AS udt_2);
+
+DROP CAST (sch.udt_1 AS sch.udt_2);
+
+-- Additional PG extensions:
+
+DROP CAST IF EXISTS (int AS bool);
+DROP CAST IF EXISTS (int AS bool) RESTRICT;
+DROP CAST IF EXISTS (int AS bool) CASCADE;
diff --git a/test/fixtures/dialects/postgres/postgres_drop_cast.yml b/test/fixtures/dialects/postgres/postgres_drop_cast.yml
new file mode 100644
index 0000000..c3a6809
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_drop_cast.yml
@@ -0,0 +1,125 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 797c59758187f686eabaaa295a9e2b392158e11ab37a934556cf024cf468e083
+file:
+- statement:
+    drop_cast_statement:
+    - keyword: DROP
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    drop_cast_statement:
+    - keyword: DROP
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: RESTRICT
+- statement_terminator: ;
+- statement:
+    drop_cast_statement:
+    - keyword: DROP
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_cast_statement:
+    - keyword: DROP
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          data_type_identifier: udt_1
+      - keyword: AS
+      - data_type:
+          data_type_identifier: udt_2
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    drop_cast_statement:
+    - keyword: DROP
+    - keyword: CAST
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          naked_identifier: sch
+          dot: .
+          data_type_identifier: udt_1
+      - keyword: AS
+      - data_type:
+          naked_identifier: sch
+          dot: .
+          data_type_identifier: udt_2
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    drop_cast_statement:
+    - keyword: DROP
+    - keyword: CAST
+    - keyword: IF
+    - keyword: EXISTS
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    drop_cast_statement:
+    - keyword: DROP
+    - keyword: CAST
+    - keyword: IF
+    - keyword: EXISTS
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: RESTRICT
+- statement_terminator: ;
+- statement:
+    drop_cast_statement:
+    - keyword: DROP
+    - keyword: CAST
+    - keyword: IF
+    - keyword: EXISTS
+    - bracketed:
+      - start_bracket: (
+      - data_type:
+          keyword: int
+      - keyword: AS
+      - data_type:
+          keyword: bool
+      - end_bracket: )
+    - keyword: CASCADE
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_drop_index.sql b/test/fixtures/dialects/postgres/postgres_drop_index.sql
new file mode 100644
index 0000000..34e522d
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_drop_index.sql
@@ -0,0 +1,35 @@
+DROP INDEX abc;
+
+DROP INDEX "abc";
+
+DROP INDEX IF EXISTS abc;
+
+DROP INDEX abc, "def", ghi;
+
+DROP INDEX IF EXISTS abc, def, ghi;
+
+-- Test CASCADE trailing keyword
+
+DROP INDEX abc CASCADE;
+
+DROP INDEX abc, def, ghi CASCADE;
+
+DROP INDEX IF EXISTS abc, def, ghi CASCADE;
+
+-- Test RESTRICT trailing keyword
+
+DROP INDEX abc RESTRICT;
+
+DROP INDEX abc, def, ghi RESTRICT;
+
+-- Test CONCURRENTLY
+
+DROP INDEX CONCURRENTLY abc;
+
+DROP INDEX CONCURRENTLY IF EXISTS abc;
+
+DROP INDEX CONCURRENTLY abc, def;
+
+DROP INDEX CONCURRENTLY IF EXISTS abc, def;
+
+DROP INDEX CONCURRENTLY abc, def CASCADE;
diff --git a/test/fixtures/dialects/postgres/postgres_drop_index.yml b/test/fixtures/dialects/postgres/postgres_drop_index.yml
new file mode 100644
index 0000000..40639a6
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_drop_index.yml
@@ -0,0 +1,172 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: a8bfaf3bfd8572b31cd437d25ca9449d13350a5be6765b90320af9bf1047a214
+file:
+- statement:
+    drop_index_statement:
+    - keyword: DROP
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: abc
+- statement_terminator: ;
+- statement:
+    drop_index_statement:
+    - keyword: DROP
+    - keyword: INDEX
+    - index_reference:
+        quoted_identifier: '"abc"'
+- statement_terminator: ;
+- statement:
+    drop_index_statement:
+    - keyword: DROP
+    - keyword: INDEX
+    - keyword: IF
+    - keyword: EXISTS
+    - index_reference:
+        naked_identifier: abc
+- statement_terminator: ;
+- statement:
+    drop_index_statement:
+    - keyword: DROP
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: abc
+    - comma: ','
+    - index_reference:
+        quoted_identifier: '"def"'
+    - comma: ','
+    - index_reference:
+        naked_identifier: ghi
+- statement_terminator: ;
+- statement:
+    drop_index_statement:
+    - keyword: DROP
+    - keyword: INDEX
+    - keyword: IF
+    - keyword: EXISTS
+    - index_reference:
+        naked_identifier: abc
+    - comma: ','
+    - index_reference:
+        naked_identifier: def
+    - comma: ','
+    - index_reference:
+        naked_identifier: ghi
+- statement_terminator: ;
+- statement:
+    drop_index_statement:
+    - keyword: DROP
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: abc
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_index_statement:
+    - keyword: DROP
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: abc
+    - comma: ','
+    - index_reference:
+        naked_identifier: def
+    - comma: ','
+    - index_reference:
+        naked_identifier: ghi
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_index_statement:
+    - keyword: DROP
+    - keyword: INDEX
+    - keyword: IF
+    - keyword: EXISTS
+    - index_reference:
+        naked_identifier: abc
+    - comma: ','
+    - index_reference:
+        naked_identifier: def
+    - comma: ','
+    - index_reference:
+        naked_identifier: ghi
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_index_statement:
+    - keyword: DROP
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: abc
+    - keyword: RESTRICT
+- statement_terminator: ;
+- statement:
+    drop_index_statement:
+    - keyword: DROP
+    - keyword: INDEX
+    - index_reference:
+        naked_identifier: abc
+    - comma: ','
+    - index_reference:
+        naked_identifier: def
+    - comma: ','
+    - index_reference:
+        naked_identifier: ghi
+    - keyword: RESTRICT
+- statement_terminator: ;
+- statement:
+    drop_index_statement:
+    - keyword: DROP
+    - keyword: INDEX
+    - keyword: CONCURRENTLY
+    - index_reference:
+        naked_identifier: abc
+- statement_terminator: ;
+- statement:
+    drop_index_statement:
+    - keyword: DROP
+    - keyword: INDEX
+    - keyword: CONCURRENTLY
+    - keyword: IF
+    - keyword: EXISTS
+    - index_reference:
+        naked_identifier: abc
+- statement_terminator: ;
+- statement:
+    drop_index_statement:
+    - keyword: DROP
+    - keyword: INDEX
+    - keyword: CONCURRENTLY
+    - index_reference:
+        naked_identifier: abc
+    - comma: ','
+    - index_reference:
+        naked_identifier: def
+- statement_terminator: ;
+- statement:
+    drop_index_statement:
+    - keyword: DROP
+    - keyword: INDEX
+    - keyword: CONCURRENTLY
+    - keyword: IF
+    - keyword: EXISTS
+    - index_reference:
+        naked_identifier: abc
+    - comma: ','
+    - index_reference:
+        naked_identifier: def
+- statement_terminator: ;
+- statement:
+    drop_index_statement:
+    - keyword: DROP
+    - keyword: INDEX
+    - keyword: CONCURRENTLY
+    - index_reference:
+        naked_identifier: abc
+    - comma: ','
+    - index_reference:
+        naked_identifier: def
+    - keyword: CASCADE
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_drop_owned.sql b/test/fixtures/dialects/postgres/postgres_drop_owned.sql
new file mode 100644
index 0000000..7a628ea
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_drop_owned.sql
@@ -0,0 +1,19 @@
+DROP OWNED BY bob;
+
+DROP OWNED BY bob, alice;
+
+DROP OWNED BY CURRENT_ROLE;
+
+DROP OWNED BY CURRENT_USER;
+
+DROP OWNED BY SESSION_USER;
+
+DROP OWNED BY bob, CURRENT_ROLE, alice, CURRENT_USER, ted;
+
+DROP OWNED BY bob CASCADE;
+
+DROP OWNED BY bob RESTRICT;
+
+DROP OWNED BY bob, alice CASCADE;
+
+DROP OWNED BY bob, alice RESTRICT;
diff --git a/test/fixtures/dialects/postgres/postgres_drop_owned.yml b/test/fixtures/dialects/postgres/postgres_drop_owned.yml
new file mode 100644
index 0000000..6ae54ae
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_drop_owned.yml
@@ -0,0 +1,107 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: f30947e3b468c099a73640d9924d061e48b1c3df0bfbdbef07c4f22810c3ca42
+file:
+- statement:
+    drop_owned_statement:
+    - keyword: DROP
+    - keyword: OWNED
+    - keyword: BY
+    - role_reference:
+        naked_identifier: bob
+- statement_terminator: ;
+- statement:
+    drop_owned_statement:
+    - keyword: DROP
+    - keyword: OWNED
+    - keyword: BY
+    - role_reference:
+        naked_identifier: bob
+    - comma: ','
+    - role_reference:
+        naked_identifier: alice
+- statement_terminator: ;
+- statement:
+    drop_owned_statement:
+    - keyword: DROP
+    - keyword: OWNED
+    - keyword: BY
+    - keyword: CURRENT_ROLE
+- statement_terminator: ;
+- statement:
+    drop_owned_statement:
+    - keyword: DROP
+    - keyword: OWNED
+    - keyword: BY
+    - keyword: CURRENT_USER
+- statement_terminator: ;
+- statement:
+    drop_owned_statement:
+    - keyword: DROP
+    - keyword: OWNED
+    - keyword: BY
+    - keyword: SESSION_USER
+- statement_terminator: ;
+- statement:
+    drop_owned_statement:
+    - keyword: DROP
+    - keyword: OWNED
+    - keyword: BY
+    - role_reference:
+        naked_identifier: bob
+    - comma: ','
+    - keyword: CURRENT_ROLE
+    - comma: ','
+    - role_reference:
+        naked_identifier: alice
+    - comma: ','
+    - keyword: CURRENT_USER
+    - comma: ','
+    - role_reference:
+        naked_identifier: ted
+- statement_terminator: ;
+- statement:
+    drop_owned_statement:
+    - keyword: DROP
+    - keyword: OWNED
+    - keyword: BY
+    - role_reference:
+        naked_identifier: bob
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_owned_statement:
+    - keyword: DROP
+    - keyword: OWNED
+    - keyword: BY
+    - role_reference:
+        naked_identifier: bob
+    - keyword: RESTRICT
+- statement_terminator: ;
+- statement:
+    drop_owned_statement:
+    - keyword: DROP
+    - keyword: OWNED
+    - keyword: BY
+    - role_reference:
+        naked_identifier: bob
+    - comma: ','
+    - role_reference:
+        naked_identifier: alice
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_owned_statement:
+    - keyword: DROP
+    - keyword: OWNED
+    - keyword: BY
+    - role_reference:
+        naked_identifier: bob
+    - comma: ','
+    - role_reference:
+        naked_identifier: alice
+    - keyword: RESTRICT
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_drop_publication.sql b/test/fixtures/dialects/postgres/postgres_drop_publication.sql
new file mode 100644
index 0000000..eb7fe1f
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_drop_publication.sql
@@ -0,0 +1,35 @@
+-- Test no trailing keyword with combinations of:
+--  * IF EXISTS
+--  * One publication vs multiple publications.
+
+DROP PUBLICATION abc;
+
+DROP PUBLICATION "abc";
+
+DROP PUBLICATION IF EXISTS abc;
+
+DROP PUBLICATION abc, "def", ghi;
+
+DROP PUBLICATION IF EXISTS abc, def, ghi;
+
+-- Test CASCADE trailing keyword
+
+DROP PUBLICATION abc CASCADE;
+
+DROP PUBLICATION IF EXISTS abc CASCADE;
+
+DROP PUBLICATION abc, def, ghi CASCADE;
+
+DROP PUBLICATION IF EXISTS abc, def, ghi CASCADE;
+
+
+-- Test RESTRICT trailing keyword
+
+DROP PUBLICATION abc RESTRICT;
+
+DROP PUBLICATION IF EXISTS abc RESTRICT;
+
+DROP PUBLICATION abc, def, ghi RESTRICT;
+
+DROP PUBLICATION IF EXISTS abc, def, ghi RESTRICT;
+
diff --git a/test/fixtures/dialects/postgres/postgres_drop_publication.yml b/test/fixtures/dialects/postgres/postgres_drop_publication.yml
new file mode 100644
index 0000000..2105465
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_drop_publication.yml
@@ -0,0 +1,154 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 41225e922471bc8b5fb87ee6f2ac4199f7c7a5f29a52dd5dee1d99fdd1e16485
+file:
+- statement:
+    drop_publication_statement:
+    - keyword: DROP
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+- statement_terminator: ;
+- statement:
+    drop_publication_statement:
+    - keyword: DROP
+    - keyword: PUBLICATION
+    - publication_reference:
+        quoted_identifier: '"abc"'
+- statement_terminator: ;
+- statement:
+    drop_publication_statement:
+    - keyword: DROP
+    - keyword: PUBLICATION
+    - keyword: IF
+    - keyword: EXISTS
+    - publication_reference:
+        naked_identifier: abc
+- statement_terminator: ;
+- statement:
+    drop_publication_statement:
+    - keyword: DROP
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - comma: ','
+    - publication_reference:
+        quoted_identifier: '"def"'
+    - comma: ','
+    - publication_reference:
+        naked_identifier: ghi
+- statement_terminator: ;
+- statement:
+    drop_publication_statement:
+    - keyword: DROP
+    - keyword: PUBLICATION
+    - keyword: IF
+    - keyword: EXISTS
+    - publication_reference:
+        naked_identifier: abc
+    - comma: ','
+    - publication_reference:
+        naked_identifier: def
+    - comma: ','
+    - publication_reference:
+        naked_identifier: ghi
+- statement_terminator: ;
+- statement:
+    drop_publication_statement:
+    - keyword: DROP
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_publication_statement:
+    - keyword: DROP
+    - keyword: PUBLICATION
+    - keyword: IF
+    - keyword: EXISTS
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_publication_statement:
+    - keyword: DROP
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - comma: ','
+    - publication_reference:
+        naked_identifier: def
+    - comma: ','
+    - publication_reference:
+        naked_identifier: ghi
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_publication_statement:
+    - keyword: DROP
+    - keyword: PUBLICATION
+    - keyword: IF
+    - keyword: EXISTS
+    - publication_reference:
+        naked_identifier: abc
+    - comma: ','
+    - publication_reference:
+        naked_identifier: def
+    - comma: ','
+    - publication_reference:
+        naked_identifier: ghi
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_publication_statement:
+    - keyword: DROP
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: RESTRICT
+- statement_terminator: ;
+- statement:
+    drop_publication_statement:
+    - keyword: DROP
+    - keyword: PUBLICATION
+    - keyword: IF
+    - keyword: EXISTS
+    - publication_reference:
+        naked_identifier: abc
+    - keyword: RESTRICT
+- statement_terminator: ;
+- statement:
+    drop_publication_statement:
+    - keyword: DROP
+    - keyword: PUBLICATION
+    - publication_reference:
+        naked_identifier: abc
+    - comma: ','
+    - publication_reference:
+        naked_identifier: def
+    - comma: ','
+    - publication_reference:
+        naked_identifier: ghi
+    - keyword: RESTRICT
+- statement_terminator: ;
+- statement:
+    drop_publication_statement:
+    - keyword: DROP
+    - keyword: PUBLICATION
+    - keyword: IF
+    - keyword: EXISTS
+    - publication_reference:
+        naked_identifier: abc
+    - comma: ','
+    - publication_reference:
+        naked_identifier: def
+    - comma: ','
+    - publication_reference:
+        naked_identifier: ghi
+    - keyword: RESTRICT
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_drop_view.sql b/test/fixtures/dialects/postgres/postgres_drop_view.sql
new file mode 100644
index 0000000..a9618fc
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_drop_view.sql
@@ -0,0 +1,30 @@
+DROP VIEW abc;
+
+DROP VIEW "abc";
+
+DROP VIEW IF EXISTS abc;
+
+DROP VIEW abc, "def", ghi;
+
+DROP VIEW IF EXISTS abc, def, ghi;
+
+-- Test CASCADE trailing keyword
+
+DROP VIEW abc CASCADE;
+
+DROP VIEW IF EXISTS abc CASCADE;
+
+DROP VIEW abc, def, ghi CASCADE;
+
+DROP VIEW IF EXISTS abc, def, ghi CASCADE;
+
+
+-- Test RESTRICT trailing keyword
+
+DROP VIEW abc RESTRICT;
+
+DROP VIEW IF EXISTS abc RESTRICT;
+
+DROP VIEW abc, def, ghi RESTRICT;
+
+DROP VIEW IF EXISTS abc, def, ghi RESTRICT;
diff --git a/test/fixtures/dialects/postgres/postgres_drop_view.yml b/test/fixtures/dialects/postgres/postgres_drop_view.yml
new file mode 100644
index 0000000..f24afcf
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_drop_view.yml
@@ -0,0 +1,154 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 36b7a96e8518c113ef9d614a8b704ce02aa976d1b69949a8e9f68c287ded1420
+file:
+- statement:
+    drop_view_statement:
+    - keyword: DROP
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: abc
+- statement_terminator: ;
+- statement:
+    drop_view_statement:
+    - keyword: DROP
+    - keyword: VIEW
+    - table_reference:
+        quoted_identifier: '"abc"'
+- statement_terminator: ;
+- statement:
+    drop_view_statement:
+    - keyword: DROP
+    - keyword: VIEW
+    - keyword: IF
+    - keyword: EXISTS
+    - table_reference:
+        naked_identifier: abc
+- statement_terminator: ;
+- statement:
+    drop_view_statement:
+    - keyword: DROP
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: abc
+    - comma: ','
+    - table_reference:
+        quoted_identifier: '"def"'
+    - comma: ','
+    - table_reference:
+        naked_identifier: ghi
+- statement_terminator: ;
+- statement:
+    drop_view_statement:
+    - keyword: DROP
+    - keyword: VIEW
+    - keyword: IF
+    - keyword: EXISTS
+    - table_reference:
+        naked_identifier: abc
+    - comma: ','
+    - table_reference:
+        naked_identifier: def
+    - comma: ','
+    - table_reference:
+        naked_identifier: ghi
+- statement_terminator: ;
+- statement:
+    drop_view_statement:
+    - keyword: DROP
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: abc
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_view_statement:
+    - keyword: DROP
+    - keyword: VIEW
+    - keyword: IF
+    - keyword: EXISTS
+    - table_reference:
+        naked_identifier: abc
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_view_statement:
+    - keyword: DROP
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: abc
+    - comma: ','
+    - table_reference:
+        naked_identifier: def
+    - comma: ','
+    - table_reference:
+        naked_identifier: ghi
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_view_statement:
+    - keyword: DROP
+    - keyword: VIEW
+    - keyword: IF
+    - keyword: EXISTS
+    - table_reference:
+        naked_identifier: abc
+    - comma: ','
+    - table_reference:
+        naked_identifier: def
+    - comma: ','
+    - table_reference:
+        naked_identifier: ghi
+    - keyword: CASCADE
+- statement_terminator: ;
+- statement:
+    drop_view_statement:
+    - keyword: DROP
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: abc
+    - keyword: RESTRICT
+- statement_terminator: ;
+- statement:
+    drop_view_statement:
+    - keyword: DROP
+    - keyword: VIEW
+    - keyword: IF
+    - keyword: EXISTS
+    - table_reference:
+        naked_identifier: abc
+    - keyword: RESTRICT
+- statement_terminator: ;
+- statement:
+    drop_view_statement:
+    - keyword: DROP
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: abc
+    - comma: ','
+    - table_reference:
+        naked_identifier: def
+    - comma: ','
+    - table_reference:
+        naked_identifier: ghi
+    - keyword: RESTRICT
+- statement_terminator: ;
+- statement:
+    drop_view_statement:
+    - keyword: DROP
+    - keyword: VIEW
+    - keyword: IF
+    - keyword: EXISTS
+    - table_reference:
+        naked_identifier: abc
+    - comma: ','
+    - table_reference:
+        naked_identifier: def
+    - comma: ','
+    - table_reference:
+        naked_identifier: ghi
+    - keyword: RESTRICT
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_overlaps.sql b/test/fixtures/dialects/postgres/postgres_overlaps.sql
new file mode 100644
index 0000000..6a39711
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_overlaps.sql
@@ -0,0 +1,12 @@
+-- with DATE
+select
+    start_date,
+    end_date
+from test_overlaps
+where (start_date, end_date) overlaps (DATE '2023-02-15', DATE '2023-03-15');
+
+select
+    start_date,
+    end_date
+from test_overlaps
+where (start_date, end_date) overlaps ('2023-02-15', '2023-03-15');
diff --git a/test/fixtures/dialects/postgres/postgres_overlaps.yml b/test/fixtures/dialects/postgres/postgres_overlaps.yml
new file mode 100644
index 0000000..70ada35
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_overlaps.yml
@@ -0,0 +1,91 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: b52f94d6189b518815f0ab38c9a623899123c23bf3e6a7a98647d57aecca1640
+file:
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: select
+      - select_clause_element:
+          column_reference:
+            naked_identifier: start_date
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: end_date
+      from_clause:
+        keyword: from
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test_overlaps
+      where_clause:
+        keyword: where
+        expression:
+          bracketed:
+          - start_bracket: (
+          - column_reference:
+              naked_identifier: start_date
+          - comma: ','
+          - column_reference:
+              naked_identifier: end_date
+          - end_bracket: )
+      overlaps_clause:
+        keyword: overlaps
+        bracketed:
+        - start_bracket: (
+        - datetime_literal:
+            datetime_type_identifier:
+              keyword: DATE
+            quoted_literal: "'2023-02-15'"
+        - comma: ','
+        - datetime_literal:
+            datetime_type_identifier:
+              keyword: DATE
+            quoted_literal: "'2023-03-15'"
+        - end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: select
+      - select_clause_element:
+          column_reference:
+            naked_identifier: start_date
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: end_date
+      from_clause:
+        keyword: from
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test_overlaps
+      where_clause:
+        keyword: where
+        expression:
+          bracketed:
+          - start_bracket: (
+          - column_reference:
+              naked_identifier: start_date
+          - comma: ','
+          - column_reference:
+              naked_identifier: end_date
+          - end_bracket: )
+      overlaps_clause:
+        keyword: overlaps
+        bracketed:
+        - start_bracket: (
+        - datetime_literal:
+            quoted_literal: "'2023-02-15'"
+        - comma: ','
+        - datetime_literal:
+            quoted_literal: "'2023-03-15'"
+        - end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_reassign_owned.sql b/test/fixtures/dialects/postgres/postgres_reassign_owned.sql
new file mode 100644
index 0000000..10520f4
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_reassign_owned.sql
@@ -0,0 +1,11 @@
+REASSIGN OWNED BY bob TO alice;
+
+REASSIGN OWNED BY bob, ted TO alice;
+
+REASSIGN OWNED BY bob, CURRENT_ROLE, ted, CURRENT_USER, sam, SESSION_USER TO alice;
+
+REASSIGN OWNED BY bob TO CURRENT_ROLE;
+
+REASSIGN OWNED BY bob TO CURRENT_USER;
+
+REASSIGN OWNED BY bob TO SESSION_USER;
diff --git a/test/fixtures/dialects/postgres/postgres_reassign_owned.yml b/test/fixtures/dialects/postgres/postgres_reassign_owned.yml
new file mode 100644
index 0000000..765b0a9
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_reassign_owned.yml
@@ -0,0 +1,85 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 7924caa50d4473f8535da52f0a3b8091a85c2cebb11fe03765ba868e31d034c6
+file:
+- statement:
+    reassign_owned_statement:
+    - keyword: REASSIGN
+    - keyword: OWNED
+    - keyword: BY
+    - role_reference:
+        naked_identifier: bob
+    - keyword: TO
+    - role_reference:
+        naked_identifier: alice
+- statement_terminator: ;
+- statement:
+    reassign_owned_statement:
+    - keyword: REASSIGN
+    - keyword: OWNED
+    - keyword: BY
+    - role_reference:
+        naked_identifier: bob
+    - comma: ','
+    - role_reference:
+        naked_identifier: ted
+    - keyword: TO
+    - role_reference:
+        naked_identifier: alice
+- statement_terminator: ;
+- statement:
+    reassign_owned_statement:
+    - keyword: REASSIGN
+    - keyword: OWNED
+    - keyword: BY
+    - role_reference:
+        naked_identifier: bob
+    - comma: ','
+    - keyword: CURRENT_ROLE
+    - comma: ','
+    - role_reference:
+        naked_identifier: ted
+    - comma: ','
+    - keyword: CURRENT_USER
+    - comma: ','
+    - role_reference:
+        naked_identifier: sam
+    - comma: ','
+    - keyword: SESSION_USER
+    - keyword: TO
+    - role_reference:
+        naked_identifier: alice
+- statement_terminator: ;
+- statement:
+    reassign_owned_statement:
+    - keyword: REASSIGN
+    - keyword: OWNED
+    - keyword: BY
+    - role_reference:
+        naked_identifier: bob
+    - keyword: TO
+    - keyword: CURRENT_ROLE
+- statement_terminator: ;
+- statement:
+    reassign_owned_statement:
+    - keyword: REASSIGN
+    - keyword: OWNED
+    - keyword: BY
+    - role_reference:
+        naked_identifier: bob
+    - keyword: TO
+    - keyword: CURRENT_USER
+- statement_terminator: ;
+- statement:
+    reassign_owned_statement:
+    - keyword: REASSIGN
+    - keyword: OWNED
+    - keyword: BY
+    - role_reference:
+        naked_identifier: bob
+    - keyword: TO
+    - keyword: SESSION_USER
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_reset.sql b/test/fixtures/dialects/postgres/postgres_reset.sql
index c7ead3e..d67d1c0 100644
--- a/test/fixtures/dialects/postgres/postgres_reset.sql
+++ b/test/fixtures/dialects/postgres/postgres_reset.sql
@@ -1,2 +1,3 @@
 RESET timezone;
 RESET ALL;
+RESET ROLE;
diff --git a/test/fixtures/dialects/postgres/postgres_reset.yml b/test/fixtures/dialects/postgres/postgres_reset.yml
index bfd1379..a5ac5a9 100644
--- a/test/fixtures/dialects/postgres/postgres_reset.yml
+++ b/test/fixtures/dialects/postgres/postgres_reset.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: e9e279911916c47d20deb5cce0ef040ce5e573f71b5044dc43b3a1f3b64bbc23
+_hash: f0addcc750a516055a2f60b81c5fb1476adfddf3f5c7ddbcf08ed68ea56d9872
 file:
 - statement:
     reset_statement:
@@ -15,3 +15,8 @@ file:
     - keyword: RESET
     - keyword: ALL
 - statement_terminator: ;
+- statement:
+    reset_statement:
+    - keyword: RESET
+    - keyword: ROLE
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_select.sql b/test/fixtures/dialects/postgres/postgres_select.sql
index 207f3fc..9eabf7a 100644
--- a/test/fixtures/dialects/postgres/postgres_select.sql
+++ b/test/fixtures/dialects/postgres/postgres_select.sql
@@ -69,3 +69,18 @@ select id, start, periods.end from periods;
 SELECT concat_lower_or_upper('Hello', 'World', true);
 SELECT concat_lower_or_upper(a => 'Hello', b => 'World');
 SELECT concat_lower_or_upper('Hello', 'World', uppercase => true);
+
+-- row-level locks can be used in Selects
+SELECT * FROM mytable FOR UPDATE;
+SELECT * FROM (SELECT * FROM mytable FOR UPDATE) ss WHERE col1 = 5;
+
+SELECT col1, col2
+FROM mytable1
+JOIN mytable2 ON col1 = col2
+ORDER BY sync_time ASC
+FOR SHARE OF mytable1, mytable2 SKIP LOCKED
+LIMIT 1;
+
+Select * from foo TABLESAMPLE SYSTEM (10);
+
+Select * from foo TABLESAMPLE BERNOULLI (10);
diff --git a/test/fixtures/dialects/postgres/postgres_select.yml b/test/fixtures/dialects/postgres/postgres_select.yml
index 2d32db3..50cd528 100644
--- a/test/fixtures/dialects/postgres/postgres_select.yml
+++ b/test/fixtures/dialects/postgres/postgres_select.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 4c2313e0a846e6f2e7aae0410ba8e11c9815f03fdf9e8cb2869c93e945b38e6f
+_hash: 3234ccf1239d9a4ac328425fb89d43b5c71d92627652c2db63246ce3f1717cdc
 file:
 - statement:
     select_statement:
@@ -678,3 +678,166 @@ file:
                   boolean_literal: 'true'
             - end_bracket: )
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: mytable
+      for_clause:
+      - keyword: FOR
+      - keyword: UPDATE
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              bracketed:
+                start_bracket: (
+                select_statement:
+                  select_clause:
+                    keyword: SELECT
+                    select_clause_element:
+                      wildcard_expression:
+                        wildcard_identifier:
+                          star: '*'
+                  from_clause:
+                    keyword: FROM
+                    from_expression:
+                      from_expression_element:
+                        table_expression:
+                          table_reference:
+                            naked_identifier: mytable
+                  for_clause:
+                  - keyword: FOR
+                  - keyword: UPDATE
+                end_bracket: )
+            alias_expression:
+              naked_identifier: ss
+      where_clause:
+        keyword: WHERE
+        expression:
+          column_reference:
+            naked_identifier: col1
+          comparison_operator:
+            raw_comparison_operator: '='
+          numeric_literal: '5'
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: col1
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: col2
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: mytable1
+          join_clause:
+            keyword: JOIN
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: mytable2
+            join_on_condition:
+              keyword: 'ON'
+              expression:
+              - column_reference:
+                  naked_identifier: col1
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                  naked_identifier: col2
+      orderby_clause:
+      - keyword: ORDER
+      - keyword: BY
+      - column_reference:
+          naked_identifier: sync_time
+      - keyword: ASC
+      for_clause:
+      - keyword: FOR
+      - keyword: SHARE
+      - keyword: OF
+      - table_reference:
+          naked_identifier: mytable1
+      - comma: ','
+      - table_reference:
+          naked_identifier: mytable2
+      - keyword: SKIP
+      - keyword: LOCKED
+      limit_clause:
+        keyword: LIMIT
+        numeric_literal: '1'
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: Select
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: from
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: foo
+            sample_expression:
+            - keyword: TABLESAMPLE
+            - keyword: SYSTEM
+            - bracketed:
+                start_bracket: (
+                numeric_literal: '10'
+                end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: Select
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: from
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: foo
+            sample_expression:
+            - keyword: TABLESAMPLE
+            - keyword: BERNOULLI
+            - bracketed:
+                start_bracket: (
+                numeric_literal: '10'
+                end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_set.sql b/test/fixtures/dialects/postgres/postgres_set.sql
index 7123a34..91f0b58 100644
--- a/test/fixtures/dialects/postgres/postgres_set.sql
+++ b/test/fixtures/dialects/postgres/postgres_set.sql
@@ -2,7 +2,14 @@ SET LOCAL search_path = DEFAULT;
 SET search_path TO my_schema, public;
 SET datestyle TO postgres, dmy;
 SET SESSION datestyle TO postgres, 'dmy';
+SET value = on, off, auto;
+SET value = TRUE, FALSE;
 SET TIME ZONE 'PST8PDT';
 SET TIME ZONE 'Europe/Rome';
 SET TIME ZONE LOCAL;
 SET TIME ZONE DEFAULT;
+SET SCHEMA  'my_schema';
+SET SCHEMA  'public';
+SET ROLE my_role;
+SET ROLE "my role";
+SET ROLE NONE;
diff --git a/test/fixtures/dialects/postgres/postgres_set.yml b/test/fixtures/dialects/postgres/postgres_set.yml
index e14e816..dd76ffd 100644
--- a/test/fixtures/dialects/postgres/postgres_set.yml
+++ b/test/fixtures/dialects/postgres/postgres_set.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: c737f9e45be0fab74b2a65be0f9dfee377c8004e59cc09a4de27301ec509f30b
+_hash: 1f838a5987921dc5b619c4bd6293baa5f135cd12fa558d2f18a6198dc6578d3d
 file:
 - statement:
     set_statement:
@@ -42,6 +42,28 @@ file:
     - comma: ','
     - quoted_literal: "'dmy'"
 - statement_terminator: ;
+- statement:
+    set_statement:
+    - keyword: SET
+    - parameter: value
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - naked_identifier: 'on'
+    - comma: ','
+    - naked_identifier: 'off'
+    - comma: ','
+    - naked_identifier: auto
+- statement_terminator: ;
+- statement:
+    set_statement:
+    - keyword: SET
+    - parameter: value
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - boolean_literal: 'TRUE'
+    - comma: ','
+    - boolean_literal: 'FALSE'
+- statement_terminator: ;
 - statement:
     set_statement:
     - keyword: SET
@@ -70,3 +92,35 @@ file:
     - keyword: ZONE
     - keyword: DEFAULT
 - statement_terminator: ;
+- statement:
+    set_statement:
+    - keyword: SET
+    - keyword: SCHEMA
+    - quoted_literal: "'my_schema'"
+- statement_terminator: ;
+- statement:
+    set_statement:
+    - keyword: SET
+    - keyword: SCHEMA
+    - quoted_literal: "'public'"
+- statement_terminator: ;
+- statement:
+    set_statement:
+    - keyword: SET
+    - keyword: ROLE
+    - role_reference:
+        naked_identifier: my_role
+- statement_terminator: ;
+- statement:
+    set_statement:
+    - keyword: SET
+    - keyword: ROLE
+    - role_reference:
+        quoted_identifier: '"my role"'
+- statement_terminator: ;
+- statement:
+    set_statement:
+    - keyword: SET
+    - keyword: ROLE
+    - keyword: NONE
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_table_functions.sql b/test/fixtures/dialects/postgres/postgres_table_functions.sql
new file mode 100644
index 0000000..9743f85
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_table_functions.sql
@@ -0,0 +1,7 @@
+select * from unnest(array['123', '456']);
+
+select * from unnest(array['123', '456']) as a(val, row_num);
+
+select * from unnest(array['123', '456']) with ordinality;
+
+select * from unnest(array['123', '456']) with ordinality as a(val, row_num);
diff --git a/test/fixtures/dialects/postgres/postgres_table_functions.yml b/test/fixtures/dialects/postgres/postgres_table_functions.yml
new file mode 100644
index 0000000..c7f9475
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_table_functions.yml
@@ -0,0 +1,151 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 87790be6cd29e35d6b49f004079316a4d1a8ce524ff8545019db85b2247c4b25
+file:
+- statement:
+    select_statement:
+      select_clause:
+        keyword: select
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: from
+        from_expression:
+          from_expression_element:
+            table_expression:
+              function:
+                function_name:
+                  function_name_identifier: unnest
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    typed_array_literal:
+                      array_type:
+                        keyword: array
+                      array_literal:
+                      - start_square_bracket: '['
+                      - quoted_literal: "'123'"
+                      - comma: ','
+                      - quoted_literal: "'456'"
+                      - end_square_bracket: ']'
+                  end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: select
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: from
+        from_expression:
+          from_expression_element:
+            table_expression:
+              function:
+                function_name:
+                  function_name_identifier: unnest
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    typed_array_literal:
+                      array_type:
+                        keyword: array
+                      array_literal:
+                      - start_square_bracket: '['
+                      - quoted_literal: "'123'"
+                      - comma: ','
+                      - quoted_literal: "'456'"
+                      - end_square_bracket: ']'
+                  end_bracket: )
+            alias_expression:
+              keyword: as
+              naked_identifier: a
+              bracketed:
+                start_bracket: (
+                identifier_list:
+                - naked_identifier: val
+                - comma: ','
+                - naked_identifier: row_num
+                end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: select
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: from
+        from_expression:
+          from_expression_element:
+            table_expression:
+            - function:
+                function_name:
+                  function_name_identifier: unnest
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    typed_array_literal:
+                      array_type:
+                        keyword: array
+                      array_literal:
+                      - start_square_bracket: '['
+                      - quoted_literal: "'123'"
+                      - comma: ','
+                      - quoted_literal: "'456'"
+                      - end_square_bracket: ']'
+                  end_bracket: )
+            - keyword: with
+            - keyword: ordinality
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: select
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: from
+        from_expression:
+          from_expression_element:
+            table_expression:
+            - function:
+                function_name:
+                  function_name_identifier: unnest
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    typed_array_literal:
+                      array_type:
+                        keyword: array
+                      array_literal:
+                      - start_square_bracket: '['
+                      - quoted_literal: "'123'"
+                      - comma: ','
+                      - quoted_literal: "'456'"
+                      - end_square_bracket: ']'
+                  end_bracket: )
+            - keyword: with
+            - keyword: ordinality
+            alias_expression:
+              keyword: as
+              naked_identifier: a
+              bracketed:
+                start_bracket: (
+                identifier_list:
+                - naked_identifier: val
+                - comma: ','
+                - naked_identifier: row_num
+                end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_vacuum.sql b/test/fixtures/dialects/postgres/postgres_vacuum.sql
new file mode 100644
index 0000000..38d4e5c
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_vacuum.sql
@@ -0,0 +1,37 @@
+-- Old-style vacuum commands
+
+VACUUM;
+VACUUM FULL;
+VACUUM FREEZE;
+VACUUM VERBOSE;
+VACUUM ANALYZE;
+VACUUM ANALYSE;
+VACUUM FULL FREEZE VERBOSE ANALYSE;
+
+VACUUM tbl;
+VACUUM tbl1, tbl2;
+VACUUM FULL FREEZE VERBOSE ANALYSE tbl1, tbl2;
+VACUUM FULL tbl1 (col1, col2), tbl2;
+VACUUM FULL tbl1 (col1), tbl2 (col1, col2);
+
+-- New-style vacuum commands
+
+VACUUM (FULL);
+VACUUM (FULL, FREEZE) tbl1;
+VACUUM (FULL, FREEZE) tbl1 (col1, col2), tbl2 (col3);
+VACUUM (FULL TRUE, FREEZE);
+VACUUM (
+    FULL TRUE,
+    FREEZE FALSE,
+    VERBOSE,
+    ANALYZE,
+    ANALYSE,
+    DISABLE_PAGE_SKIPPING,
+    SKIP_LOCKED,
+    INDEX_CLEANUP on,
+    PROCESS_TOAST,
+    TRUNCATE,
+    PARALLEL 70
+);
+VACUUM (INDEX_CLEANUP off);
+VACUUM (INDEX_CLEANUP auto);
diff --git a/test/fixtures/dialects/postgres/postgres_vacuum.yml b/test/fixtures/dialects/postgres/postgres_vacuum.yml
new file mode 100644
index 0000000..a51f9ed
--- /dev/null
+++ b/test/fixtures/dialects/postgres/postgres_vacuum.yml
@@ -0,0 +1,222 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 617238e8c1decb78501479b84b05ee00fae0aca517bd0d5d3d0b112a6dcd7f29
+file:
+- statement:
+    vacuum_statement:
+      keyword: VACUUM
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+    - keyword: VACUUM
+    - keyword: FULL
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+    - keyword: VACUUM
+    - keyword: FREEZE
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+    - keyword: VACUUM
+    - keyword: VERBOSE
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+    - keyword: VACUUM
+    - keyword: ANALYZE
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+    - keyword: VACUUM
+    - keyword: ANALYSE
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+    - keyword: VACUUM
+    - keyword: FULL
+    - keyword: FREEZE
+    - keyword: VERBOSE
+    - keyword: ANALYSE
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+      keyword: VACUUM
+      table_reference:
+        naked_identifier: tbl
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+    - keyword: VACUUM
+    - table_reference:
+        naked_identifier: tbl1
+    - comma: ','
+    - table_reference:
+        naked_identifier: tbl2
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+    - keyword: VACUUM
+    - keyword: FULL
+    - keyword: FREEZE
+    - keyword: VERBOSE
+    - keyword: ANALYSE
+    - table_reference:
+        naked_identifier: tbl1
+    - comma: ','
+    - table_reference:
+        naked_identifier: tbl2
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+    - keyword: VACUUM
+    - keyword: FULL
+    - table_reference:
+        naked_identifier: tbl1
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: col1
+      - comma: ','
+      - column_reference:
+          naked_identifier: col2
+      - end_bracket: )
+    - comma: ','
+    - table_reference:
+        naked_identifier: tbl2
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+    - keyword: VACUUM
+    - keyword: FULL
+    - table_reference:
+        naked_identifier: tbl1
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: col1
+        end_bracket: )
+    - comma: ','
+    - table_reference:
+        naked_identifier: tbl2
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: col1
+      - comma: ','
+      - column_reference:
+          naked_identifier: col2
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+      keyword: VACUUM
+      bracketed:
+        start_bracket: (
+        keyword: FULL
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+      keyword: VACUUM
+      bracketed:
+      - start_bracket: (
+      - keyword: FULL
+      - comma: ','
+      - keyword: FREEZE
+      - end_bracket: )
+      table_reference:
+        naked_identifier: tbl1
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+    - keyword: VACUUM
+    - bracketed:
+      - start_bracket: (
+      - keyword: FULL
+      - comma: ','
+      - keyword: FREEZE
+      - end_bracket: )
+    - table_reference:
+        naked_identifier: tbl1
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: col1
+      - comma: ','
+      - column_reference:
+          naked_identifier: col2
+      - end_bracket: )
+    - comma: ','
+    - table_reference:
+        naked_identifier: tbl2
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: col3
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+      keyword: VACUUM
+      bracketed:
+      - start_bracket: (
+      - keyword: FULL
+      - boolean_literal: 'TRUE'
+      - comma: ','
+      - keyword: FREEZE
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+      keyword: VACUUM
+      bracketed:
+      - start_bracket: (
+      - keyword: FULL
+      - boolean_literal: 'TRUE'
+      - comma: ','
+      - keyword: FREEZE
+      - boolean_literal: 'FALSE'
+      - comma: ','
+      - keyword: VERBOSE
+      - comma: ','
+      - keyword: ANALYZE
+      - comma: ','
+      - keyword: ANALYSE
+      - comma: ','
+      - keyword: DISABLE_PAGE_SKIPPING
+      - comma: ','
+      - keyword: SKIP_LOCKED
+      - comma: ','
+      - keyword: INDEX_CLEANUP
+      - naked_identifier: 'on'
+      - comma: ','
+      - keyword: PROCESS_TOAST
+      - comma: ','
+      - keyword: TRUNCATE
+      - comma: ','
+      - keyword: PARALLEL
+      - numeric_literal: '70'
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+      keyword: VACUUM
+      bracketed:
+        start_bracket: (
+        keyword: INDEX_CLEANUP
+        naked_identifier: 'off'
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    vacuum_statement:
+      keyword: VACUUM
+      bracketed:
+        start_bracket: (
+        keyword: INDEX_CLEANUP
+        naked_identifier: auto
+        end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/postgres/postgres_values_in_subquery.yml b/test/fixtures/dialects/postgres/postgres_values_in_subquery.yml
index 364d9f1..b10e58b 100644
--- a/test/fixtures/dialects/postgres/postgres_values_in_subquery.yml
+++ b/test/fixtures/dialects/postgres/postgres_values_in_subquery.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: aba2f3f0a365ff71ee9b1e4e8f15c6529a208eed45bf272d26fd63097de2c522
+_hash: 933cfd65728859ad86b8bd1b29e76c524ff542d39de19ac6cd7ec3587b4814a6
 file:
 - statement:
     with_compound_statement:
@@ -34,12 +34,13 @@ file:
                   casting_operator: '::'
                   data_type:
                     keyword: NUMERIC
-                    bracketed:
-                    - start_bracket: (
-                    - numeric_literal: '4'
-                    - comma: ','
-                    - numeric_literal: '3'
-                    - end_bracket: )
+                    bracketed_arguments:
+                      bracketed:
+                      - start_bracket: (
+                      - numeric_literal: '4'
+                      - comma: ','
+                      - numeric_literal: '3'
+                      - end_bracket: )
             - end_bracket: )
           end_bracket: )
       select_statement:
diff --git a/test/fixtures/dialects/postgres/postgres_with.sql b/test/fixtures/dialects/postgres/postgres_with.sql
index 0597560..6483888 100644
--- a/test/fixtures/dialects/postgres/postgres_with.sql
+++ b/test/fixtures/dialects/postgres/postgres_with.sql
@@ -1,3 +1,9 @@
+WITH w AS MATERIALIZED (
+    SELECT * FROM other_table
+)
+SELECT * FROM w AS w1 JOIN w AS w2 ON w1.key = w2.ref
+WHERE w2.key = 123;
+
 WITH w AS NOT MATERIALIZED (
     SELECT * FROM big_table
 )
@@ -33,3 +39,19 @@ WITH RECURSIVE search_graph(id, link, data, depth) AS (
     WHERE g.id = sg.link
 ) CYCLE id SET is_cycle USING path
 SELECT * FROM search_graph;
+
+-- test that DML queries are also selectable
+WITH tbl AS (
+    INSERT INTO a VALUES (5) RETURNING *
+)
+SELECT * FROM tbl;
+
+WITH tbl AS (
+    UPDATE a SET b = 5 RETURNING *
+)
+SELECT * FROM tbl;
+
+WITH tbl AS (
+    DELETE FROM a RETURNING *
+)
+SELECT * FROM tbl;
diff --git a/test/fixtures/dialects/postgres/postgres_with.yml b/test/fixtures/dialects/postgres/postgres_with.yml
index c218959..1ac47e5 100644
--- a/test/fixtures/dialects/postgres/postgres_with.yml
+++ b/test/fixtures/dialects/postgres/postgres_with.yml
@@ -3,8 +3,82 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 54e36cd157ed51f1b6e1d291ba93aa5eaa2c2a18744dd35bb06ee2cbbd7fce20
+_hash: 7dd7fa95cac9b0cf5224a8f2a0d007882f19ee2e7424f1339a6b0611e55df7b9
 file:
+- statement:
+    with_compound_statement:
+      keyword: WITH
+      common_table_expression:
+      - naked_identifier: w
+      - keyword: AS
+      - keyword: MATERIALIZED
+      - bracketed:
+          start_bracket: (
+          select_statement:
+            select_clause:
+              keyword: SELECT
+              select_clause_element:
+                wildcard_expression:
+                  wildcard_identifier:
+                    star: '*'
+            from_clause:
+              keyword: FROM
+              from_expression:
+                from_expression_element:
+                  table_expression:
+                    table_reference:
+                      naked_identifier: other_table
+          end_bracket: )
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: w
+              alias_expression:
+                keyword: AS
+                naked_identifier: w1
+            join_clause:
+              keyword: JOIN
+              from_expression_element:
+                table_expression:
+                  table_reference:
+                    naked_identifier: w
+                alias_expression:
+                  keyword: AS
+                  naked_identifier: w2
+              join_on_condition:
+                keyword: 'ON'
+                expression:
+                - column_reference:
+                  - naked_identifier: w1
+                  - dot: .
+                  - naked_identifier: key
+                - comparison_operator:
+                    raw_comparison_operator: '='
+                - column_reference:
+                  - naked_identifier: w2
+                  - dot: .
+                  - naked_identifier: ref
+        where_clause:
+          keyword: WHERE
+          expression:
+            column_reference:
+            - naked_identifier: w2
+            - dot: .
+            - naked_identifier: key
+            comparison_operator:
+              raw_comparison_operator: '='
+            numeric_literal: '123'
+- statement_terminator: ;
 - statement:
     with_compound_statement:
       keyword: WITH
@@ -485,3 +559,110 @@ file:
                 table_reference:
                   naked_identifier: search_graph
 - statement_terminator: ;
+- statement:
+    with_compound_statement:
+      keyword: WITH
+      common_table_expression:
+        naked_identifier: tbl
+        keyword: AS
+        bracketed:
+          start_bracket: (
+          insert_statement:
+          - keyword: INSERT
+          - keyword: INTO
+          - table_reference:
+              naked_identifier: a
+          - values_clause:
+              keyword: VALUES
+              bracketed:
+                start_bracket: (
+                expression:
+                  numeric_literal: '5'
+                end_bracket: )
+          - keyword: RETURNING
+          - star: '*'
+          end_bracket: )
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: tbl
+- statement_terminator: ;
+- statement:
+    with_compound_statement:
+      keyword: WITH
+      common_table_expression:
+        naked_identifier: tbl
+        keyword: AS
+        bracketed:
+          start_bracket: (
+          update_statement:
+          - keyword: UPDATE
+          - table_reference:
+              naked_identifier: a
+          - set_clause_list:
+              keyword: SET
+              set_clause:
+                column_reference:
+                  naked_identifier: b
+                comparison_operator:
+                  raw_comparison_operator: '='
+                numeric_literal: '5'
+          - keyword: RETURNING
+          - star: '*'
+          end_bracket: )
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: tbl
+- statement_terminator: ;
+- statement:
+    with_compound_statement:
+      keyword: WITH
+      common_table_expression:
+        naked_identifier: tbl
+        keyword: AS
+        bracketed:
+          start_bracket: (
+          delete_statement:
+          - keyword: DELETE
+          - keyword: FROM
+          - table_reference:
+              naked_identifier: a
+          - keyword: RETURNING
+          - star: '*'
+          end_bracket: )
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: tbl
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/redshift/redshift_alter_table.yml b/test/fixtures/dialects/redshift/redshift_alter_table.yml
index fdee0c5..6746a73 100644
--- a/test/fixtures/dialects/redshift/redshift_alter_table.yml
+++ b/test/fixtures/dialects/redshift/redshift_alter_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 6bc3c02fc648cb830e03576656136fbc00c666846eec82602e50eb1ab48945f2
+_hash: b358e5c9e83a669ac2d4bdf4ac461d1798190d2ff1004402370fd07685404904
 file:
 - statement:
     alter_table_statement:
@@ -97,10 +97,11 @@ file:
       - keyword: type
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '300'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '300'
+              end_bracket: )
 - statement_terminator: ;
 - statement:
     create_table_statement:
@@ -130,10 +131,11 @@ file:
           naked_identifier: c2
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '16'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '16'
+              end_bracket: )
       - column_attribute_segment:
         - keyword: encode
         - keyword: lzo
@@ -142,10 +144,11 @@ file:
           naked_identifier: c3
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '32'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '32'
+              end_bracket: )
       - column_attribute_segment:
         - keyword: encode
         - keyword: zstd
diff --git a/test/fixtures/dialects/redshift/redshift_alter_user.yml b/test/fixtures/dialects/redshift/redshift_alter_user.yml
index 39d23ef..99c0ee1 100644
--- a/test/fixtures/dialects/redshift/redshift_alter_user.yml
+++ b/test/fixtures/dialects/redshift/redshift_alter_user.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 716360cf5074e9649b480c59a6dd51b0f9edb1dfe792ca9c77e57098faa33d35
+_hash: f5ee1ab7a9f0253825805dba5756a115c6d7f90029c64bc8c51bcdc7464624b3
 file:
 - statement:
     alter_role_statement:
@@ -40,14 +40,13 @@ file:
     - keyword: nocreatedb
 - statement_terminator: ;
 - statement:
-    alter_user_statement:
+    alter_role_statement:
     - keyword: alter
     - keyword: user
     - role_reference:
         quoted_identifier: '"dbuser"'
     - keyword: reset
-    - object_reference:
-        naked_identifier: var
+    - parameter: var
 - statement_terminator: ;
 - statement:
     alter_user_statement:
@@ -307,14 +306,13 @@ file:
     - keyword: timeout
 - statement_terminator: ;
 - statement:
-    alter_user_statement:
+    alter_role_statement:
     - keyword: alter
     - keyword: user
     - role_reference:
         naked_identifier: dbuser
     - keyword: set
-    - object_reference:
-        naked_identifier: var
+    - parameter: var
     - keyword: to
     - numeric_literal: '100'
 - statement_terminator: ;
@@ -338,8 +336,7 @@ file:
     - role_reference:
         naked_identifier: dbuser
     - keyword: set
-    - object_reference:
-        naked_identifier: var
+    - parameter: var
     - comparison_operator:
         raw_comparison_operator: '='
     - quoted_literal: "'hi'"
@@ -365,8 +362,7 @@ file:
     - role_reference:
         naked_identifier: dbuser
     - keyword: set
-    - object_reference:
-        naked_identifier: var
+    - parameter: var
     - keyword: to
     - keyword: default
 - statement_terminator: ;
@@ -390,8 +386,7 @@ file:
     - role_reference:
         naked_identifier: dbuser
     - keyword: set
-    - object_reference:
-        naked_identifier: var
+    - parameter: var
     - comparison_operator:
         raw_comparison_operator: '='
     - keyword: default
@@ -411,14 +406,13 @@ file:
     - keyword: default
 - statement_terminator: ;
 - statement:
-    alter_user_statement:
+    alter_role_statement:
     - keyword: alter
     - keyword: user
     - role_reference:
         naked_identifier: dbuser
     - keyword: reset
-    - object_reference:
-        naked_identifier: var
+    - parameter: var
 - statement_terminator: ;
 - statement:
     alter_user_statement:
diff --git a/test/fixtures/dialects/redshift/redshift_cast_conversion.yml b/test/fixtures/dialects/redshift/redshift_cast_conversion.yml
index 6d2b295..7736aab 100644
--- a/test/fixtures/dialects/redshift/redshift_cast_conversion.yml
+++ b/test/fixtures/dialects/redshift/redshift_cast_conversion.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 6571d63bee7a840364cfc9dcc69baad6ce37f3b03876af1532d5469fdab84aa2
+_hash: 00f304d6ee102003b6d8f10e865a978a0d8426ae757544b3c721bc29c25262db
 file:
 - statement:
     select_statement:
@@ -164,12 +164,13 @@ file:
               keyword: as
               data_type:
                 keyword: decimal
-                bracketed:
-                - start_bracket: (
-                - numeric_literal: '38'
-                - comma: ','
-                - numeric_literal: '2'
-                - end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                  - start_bracket: (
+                  - numeric_literal: '38'
+                  - comma: ','
+                  - numeric_literal: '2'
+                  - end_bracket: )
               end_bracket: )
       from_clause:
         keyword: from
@@ -191,12 +192,13 @@ file:
               start_bracket: (
               data_type:
                 keyword: decimal
-                bracketed:
-                - start_bracket: (
-                - numeric_literal: '38'
-                - comma: ','
-                - numeric_literal: '2'
-                - end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                  - start_bracket: (
+                  - numeric_literal: '38'
+                  - comma: ','
+                  - numeric_literal: '2'
+                  - end_bracket: )
               comma: ','
               expression:
                 column_reference:
@@ -222,12 +224,13 @@ file:
               casting_operator: '::'
               data_type:
                 keyword: decimal
-                bracketed:
-                - start_bracket: (
-                - numeric_literal: '38'
-                - comma: ','
-                - numeric_literal: '2'
-                - end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                  - start_bracket: (
+                  - numeric_literal: '38'
+                  - comma: ','
+                  - numeric_literal: '2'
+                  - end_bracket: )
       from_clause:
         keyword: from
         from_expression:
diff --git a/test/fixtures/dialects/redshift/redshift_cast_with_whitespaces.yml b/test/fixtures/dialects/redshift/redshift_cast_with_whitespaces.yml
index b442cf8..05b8ce8 100644
--- a/test/fixtures/dialects/redshift/redshift_cast_with_whitespaces.yml
+++ b/test/fixtures/dialects/redshift/redshift_cast_with_whitespaces.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 45f6ce610eb11288932f751215ccd224f40f71b9709a919f29620b0453a0cdaf
+_hash: 8aa7f240daeacea8a3fa688ddb3fea842e1a61cbfda2ff0d93b25e749ebb126b
 file:
 - statement:
     select_statement:
@@ -138,10 +138,11 @@ file:
               casting_operator: '::'
               data_type:
                 keyword: VARCHAR
-                bracketed:
-                  start_bracket: (
-                  numeric_literal: '512'
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    numeric_literal: '512'
+                    end_bracket: )
       from_clause:
         keyword: FROM
         from_expression:
@@ -233,10 +234,11 @@ file:
                   casting_operator: '::'
                   data_type:
                     keyword: VARCHAR
-                    bracketed:
-                      start_bracket: (
-                      numeric_literal: '512'
-                      end_bracket: )
+                    bracketed_arguments:
+                      bracketed:
+                        start_bracket: (
+                        numeric_literal: '512'
+                        end_bracket: )
               - comparison_operator:
                   raw_comparison_operator: '='
               - cast_expression:
@@ -247,10 +249,11 @@ file:
                   casting_operator: '::'
                   data_type:
                     keyword: VARCHAR
-                    bracketed:
-                      start_bracket: (
-                      numeric_literal: '512'
-                      end_bracket: )
+                    bracketed_arguments:
+                      bracketed:
+                        start_bracket: (
+                        numeric_literal: '512'
+                        end_bracket: )
       where_clause:
         keyword: WHERE
         expression:
diff --git a/test/fixtures/dialects/redshift/redshift_create_external_table.yml b/test/fixtures/dialects/redshift/redshift_create_external_table.yml
index 727341e..38e3a0a 100644
--- a/test/fixtures/dialects/redshift/redshift_create_external_table.yml
+++ b/test/fixtures/dialects/redshift/redshift_create_external_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: e726b6bf80889e1cb0aa978bee525887797e2058afecba7ee00f19572ec16d8c
+_hash: 5b6df3353f64d90fb128da7d2a5d1a94e0961673673ec9d3ad2feb1de1c6d8ff
 file:
 - statement:
     create_external_table_statement:
@@ -281,12 +281,13 @@ file:
           naked_identifier: pricepaid
       - data_type:
           keyword: decimal
-          bracketed:
-          - start_bracket: (
-          - numeric_literal: '8'
-          - comma: ','
-          - numeric_literal: '2'
-          - end_bracket: )
+          bracketed_arguments:
+            bracketed:
+            - start_bracket: (
+            - numeric_literal: '8'
+            - comma: ','
+            - numeric_literal: '2'
+            - end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: saletime
@@ -348,10 +349,11 @@ file:
           naked_identifier: event_type
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '10'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '10'
+              end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: recipientaccountid
@@ -402,21 +404,23 @@ file:
           naked_identifier: club_name
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '15'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '15'
+              end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: league_spi
       - data_type:
           keyword: decimal
-          bracketed:
-          - start_bracket: (
-          - numeric_literal: '6'
-          - comma: ','
-          - numeric_literal: '2'
-          - end_bracket: )
+          bracketed_arguments:
+            bracketed:
+            - start_bracket: (
+            - numeric_literal: '6'
+            - comma: ','
+            - numeric_literal: '2'
+            - end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: league_nspi
@@ -468,10 +472,11 @@ file:
           naked_identifier: col2
       - data_type:
           keyword: varchar
-          bracketed:
-            start_bracket: (
-            numeric_literal: '10'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '10'
+              end_bracket: )
       - end_bracket: )
     - keyword: ROW
     - keyword: FORMAT
diff --git a/test/fixtures/dialects/redshift/redshift_create_external_table_as.yml b/test/fixtures/dialects/redshift/redshift_create_external_table_as.yml
index d560ad8..17b4ebb 100644
--- a/test/fixtures/dialects/redshift/redshift_create_external_table_as.yml
+++ b/test/fixtures/dialects/redshift/redshift_create_external_table_as.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 544c9c565672cef9ac4cefe062707d7dcde10903d1403182d76c833b5526b8b9
+_hash: 5a2eac3d5e8513beb33d3032b380eb39d87c37ababa89c2006b45da8aecf0b5e
 file:
 - statement:
     create_external_table_statement:
@@ -504,10 +504,11 @@ file:
             naked_identifier: l_shipmode
         - data_type:
             keyword: varchar
-            bracketed:
-              start_bracket: (
-              numeric_literal: '24'
-              end_bracket: )
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                numeric_literal: '24'
+                end_bracket: )
         - end_bracket: )
     - keyword: ROW
     - keyword: FORMAT
diff --git a/test/fixtures/dialects/redshift/redshift_create_model.yml b/test/fixtures/dialects/redshift/redshift_create_model.yml
index 83445ea..e0d9ae2 100644
--- a/test/fixtures/dialects/redshift/redshift_create_model.yml
+++ b/test/fixtures/dialects/redshift/redshift_create_model.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 6513c9d67d768b6b8d8fbe2e8b8ba21236c8c4fc3b0b300c1ac0dd492f38b461
+_hash: 08b1482a19515e0e55aaa7e583f133a972bc89ed1c69182d51b5ba62e9f3b3cb
 file:
 - statement:
     create_model_statement:
@@ -85,10 +85,10 @@ file:
     - keyword: EXCEPT
     - bracketed:
       - start_bracket: (
-      - raw: NUM_ROUND
+      - code: NUM_ROUND
       - single_quote: "'100'"
       - comma: ','
-      - raw: NUM_CLASS
+      - code: NUM_CLASS
       - single_quote: "'30'"
       - end_bracket: )
     - keyword: SETTINGS
@@ -192,7 +192,7 @@ file:
     - keyword: EXCEPT
     - bracketed:
         start_bracket: (
-        raw: K
+        code: K
         single_quote: "'5'"
         end_bracket: )
     - keyword: SETTINGS
diff --git a/test/fixtures/dialects/redshift/redshift_create_procedure.yml b/test/fixtures/dialects/redshift/redshift_create_procedure.yml
index b09abe6..cb55259 100644
--- a/test/fixtures/dialects/redshift/redshift_create_procedure.yml
+++ b/test/fixtures/dialects/redshift/redshift_create_procedure.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 16bdf27c12ddfed8b01f70cdd28d5ef9448e8161bb52cbbb05ac2492e515ec67
+_hash: 28b9d6a0b11c121a1f22aabba6fb4737c14511348e978c8ad2a58cf98697c1d7
 file:
 - statement:
     create_procedure_statement:
@@ -23,10 +23,11 @@ file:
         - parameter: f2
         - data_type:
             keyword: varchar
-            bracketed:
-              start_bracket: (
-              numeric_literal: '20'
-              end_bracket: )
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                numeric_literal: '20'
+                end_bracket: )
         - end_bracket: )
     - function_definition:
       - keyword: AS
@@ -60,19 +61,21 @@ file:
         - keyword: INOUT
         - data_type:
             keyword: varchar
-            bracketed:
-              start_bracket: (
-              numeric_literal: '256'
-              end_bracket: )
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                numeric_literal: '256'
+                end_bracket: )
         - comma: ','
         - parameter: out_var
         - keyword: OUT
         - data_type:
             keyword: varchar
-            bracketed:
-              start_bracket: (
-              numeric_literal: '256'
-              end_bracket: )
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                numeric_literal: '256'
+                end_bracket: )
         - end_bracket: )
     - function_definition:
         keyword: AS
diff --git a/test/fixtures/dialects/redshift/redshift_create_rls_policy.yml b/test/fixtures/dialects/redshift/redshift_create_rls_policy.yml
index 15b7458..78b547e 100644
--- a/test/fixtures/dialects/redshift/redshift_create_rls_policy.yml
+++ b/test/fixtures/dialects/redshift/redshift_create_rls_policy.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: a0d9fd0e05165e340cbde4a3938407ec0f549388c1fc0cc633f0a59dd35e7075
+_hash: 4a3d668d485fa7a049937fa617411a44a32ddfe24d99dfd50558f6d64e04d58f
 file:
 - statement:
     create_rls_policy_statement:
@@ -19,10 +19,11 @@ file:
           naked_identifier: catgroup
         data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '10'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '10'
+              end_bracket: )
         end_bracket: )
     - keyword: USING
     - bracketed:
@@ -49,21 +50,23 @@ file:
           naked_identifier: foo
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '10'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '10'
+              end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: bar
       - data_type:
           keyword: DECIMAL
-          bracketed:
-          - start_bracket: (
-          - numeric_literal: '10'
-          - comma: ','
-          - numeric_literal: '2'
-          - end_bracket: )
+          bracketed_arguments:
+            bracketed:
+            - start_bracket: (
+            - numeric_literal: '10'
+            - comma: ','
+            - numeric_literal: '2'
+            - end_bracket: )
       - end_bracket: )
     - keyword: AS
     - alias_expression:
diff --git a/test/fixtures/dialects/redshift/redshift_create_schema.yml b/test/fixtures/dialects/redshift/redshift_create_schema.yml
index 29800ec..f820a3a 100644
--- a/test/fixtures/dialects/redshift/redshift_create_schema.yml
+++ b/test/fixtures/dialects/redshift/redshift_create_schema.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: f22d569afdcb89d848ee18f33b4b2e6266e14a562eaafb3c38dc15add5be1c80
+_hash: 5884da012d93c06f3600f3449b67610e88369ece73a415609623e7b8d6781281
 file:
 - statement:
     create_schema_statement:
@@ -29,7 +29,7 @@ file:
     - schema_reference:
         naked_identifier: s1
     - keyword: AUTHORIZATION
-    - object_reference:
+    - role_reference:
         naked_identifier: dwuser
 - statement_terminator: ;
 - statement:
@@ -42,7 +42,7 @@ file:
     - schema_reference:
         naked_identifier: s1
     - keyword: AUTHORIZATION
-    - object_reference:
+    - role_reference:
         naked_identifier: dwuser
 - statement_terminator: ;
 - statement:
@@ -52,7 +52,7 @@ file:
     - schema_reference:
         naked_identifier: s1
     - keyword: AUTHORIZATION
-    - object_reference:
+    - role_reference:
         naked_identifier: dwuser
     - keyword: QUOTA
     - numeric_literal: '100'
@@ -68,7 +68,7 @@ file:
     - schema_reference:
         naked_identifier: s1
     - keyword: AUTHORIZATION
-    - object_reference:
+    - role_reference:
         naked_identifier: dwuser
     - keyword: QUOTA
     - numeric_literal: '100'
@@ -81,7 +81,7 @@ file:
     - schema_reference:
         naked_identifier: s1
     - keyword: AUTHORIZATION
-    - object_reference:
+    - role_reference:
         naked_identifier: dwuser
     - keyword: QUOTA
     - numeric_literal: '5'
@@ -97,7 +97,7 @@ file:
     - schema_reference:
         naked_identifier: s1
     - keyword: AUTHORIZATION
-    - object_reference:
+    - role_reference:
         naked_identifier: dwuser
     - keyword: QUOTA
     - numeric_literal: '5'
@@ -110,7 +110,7 @@ file:
     - schema_reference:
         naked_identifier: s1
     - keyword: AUTHORIZATION
-    - object_reference:
+    - role_reference:
         naked_identifier: dwuser
     - keyword: QUOTA
     - numeric_literal: '0.1'
@@ -126,7 +126,7 @@ file:
     - schema_reference:
         naked_identifier: s1
     - keyword: AUTHORIZATION
-    - object_reference:
+    - role_reference:
         naked_identifier: dwuser
     - keyword: QUOTA
     - numeric_literal: '0.1'
@@ -139,7 +139,7 @@ file:
     - schema_reference:
         naked_identifier: s1
     - keyword: AUTHORIZATION
-    - object_reference:
+    - role_reference:
         naked_identifier: dwuser
     - keyword: QUOTA
     - keyword: UNLIMITED
@@ -154,7 +154,7 @@ file:
     - schema_reference:
         naked_identifier: s1
     - keyword: AUTHORIZATION
-    - object_reference:
+    - role_reference:
         naked_identifier: dwuser
     - keyword: QUOTA
     - keyword: UNLIMITED
@@ -164,7 +164,7 @@ file:
     - keyword: CREATE
     - keyword: SCHEMA
     - keyword: AUTHORIZATION
-    - object_reference:
+    - role_reference:
         naked_identifier: dwuser
 - statement_terminator: ;
 - statement:
@@ -172,7 +172,7 @@ file:
     - keyword: CREATE
     - keyword: SCHEMA
     - keyword: AUTHORIZATION
-    - object_reference:
+    - role_reference:
         naked_identifier: dwuser
     - keyword: QUOTA
     - numeric_literal: '100'
@@ -183,7 +183,7 @@ file:
     - keyword: CREATE
     - keyword: SCHEMA
     - keyword: AUTHORIZATION
-    - object_reference:
+    - role_reference:
         naked_identifier: dwuser
     - keyword: QUOTA
     - numeric_literal: '5'
@@ -194,7 +194,7 @@ file:
     - keyword: CREATE
     - keyword: SCHEMA
     - keyword: AUTHORIZATION
-    - object_reference:
+    - role_reference:
         naked_identifier: dwuser
     - keyword: QUOTA
     - numeric_literal: '0.1'
@@ -205,7 +205,7 @@ file:
     - keyword: CREATE
     - keyword: SCHEMA
     - keyword: AUTHORIZATION
-    - object_reference:
+    - role_reference:
         naked_identifier: dwuser
     - keyword: QUOTA
     - keyword: UNLIMITED
diff --git a/test/fixtures/dialects/redshift/redshift_create_table.yml b/test/fixtures/dialects/redshift/redshift_create_table.yml
index e08673e..1a76d47 100644
--- a/test/fixtures/dialects/redshift/redshift_create_table.yml
+++ b/test/fixtures/dialects/redshift/redshift_create_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: e4accb429e0edd7857ac7c2df421079bd7aa7a9ac5ed932491e9e071bfe6c227
+_hash: 7d3fe19e4caf58d383de1e5d252eb2b50348dfcb5fa8a3a0a44196995e2453ce
 file:
 - statement:
     create_table_statement:
@@ -25,10 +25,11 @@ file:
           naked_identifier: col2
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - column_constraint_segment:
         - keyword: NOT
         - keyword: 'NULL'
@@ -67,10 +68,11 @@ file:
           naked_identifier: col2
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - column_attribute_segment:
         - keyword: GENERATED
         - keyword: BY
@@ -118,46 +120,51 @@ file:
           naked_identifier: col2
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: col3
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: col4
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: col5
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - comma: ','
       - column_reference:
           naked_identifier: col6
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - end_bracket: )
     - table_constraint:
       - keyword: DISTKEY
@@ -200,10 +207,11 @@ file:
           naked_identifier: col2
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - column_constraint_segment:
           keyword: REFERENCES
           table_reference:
@@ -251,10 +259,11 @@ file:
           naked_identifier: col2
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - end_bracket: )
     - table_constraint:
       - keyword: DISTKEY
@@ -291,10 +300,11 @@ file:
           naked_identifier: col2
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - end_bracket: )
     - table_constraint:
       - keyword: DISTKEY
@@ -331,10 +341,11 @@ file:
           naked_identifier: col2
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - end_bracket: )
     - table_constraint:
       - keyword: DISTKEY
@@ -371,10 +382,11 @@ file:
           naked_identifier: col2
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - end_bracket: )
     - keyword: BACKUP
     - keyword: 'YES'
@@ -400,10 +412,11 @@ file:
           naked_identifier: col2
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - end_bracket: )
     - keyword: BACKUP
     - keyword: 'NO'
@@ -447,10 +460,11 @@ file:
           naked_identifier: col2
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - column_attribute_segment:
         - keyword: ENCODE
         - keyword: TEXT255
@@ -492,10 +506,11 @@ file:
           naked_identifier: col2
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - column_attribute_segment:
         - keyword: ENCODE
         - keyword: TEXT255
@@ -504,10 +519,11 @@ file:
           naked_identifier: col3
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - column_attribute_segment:
         - keyword: COLLATE
         - keyword: CASE_SENSITIVE
@@ -516,10 +532,11 @@ file:
           naked_identifier: col3
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
       - column_attribute_segment:
         - keyword: COLLATE
         - keyword: CASE_INSENSITIVE
@@ -552,10 +569,11 @@ file:
           naked_identifier: col3
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '60'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '60'
+              end_bracket: )
       - comma: ','
       - table_constraint:
           keyword: UNIQUE
@@ -610,10 +628,11 @@ file:
           naked_identifier: col3
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '60'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '60'
+              end_bracket: )
       - comma: ','
       - table_constraint:
         - keyword: PRIMARY
@@ -673,10 +692,11 @@ file:
           naked_identifier: col3
       - data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '60'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '60'
+              end_bracket: )
       - comma: ','
       - table_constraint:
         - keyword: FOREIGN
@@ -801,10 +821,11 @@ file:
           naked_identifier: col_name
         data_type:
           keyword: VARCHAR
-          bracketed:
-            start_bracket: (
-            numeric_literal: '5'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              numeric_literal: '5'
+              end_bracket: )
         end_bracket: )
 - statement_terminator: ;
 - statement:
diff --git a/test/fixtures/dialects/snowflake/snowflake_alter_role.sql b/test/fixtures/dialects/snowflake/snowflake_alter_role.sql
new file mode 100644
index 0000000..6e97ad3
--- /dev/null
+++ b/test/fixtures/dialects/snowflake/snowflake_alter_role.sql
@@ -0,0 +1,10 @@
+ALTER ROLE IF EXISTS "test_role" RENAME TO "prod_role";
+ALTER ROLE "test_role" RENAME TO "prod_role";
+ALTER ROLE IF EXISTS "test_role" SET COMMENT = 'test_comment';
+ALTER ROLE IF EXISTS "test_role" UNSET COMMENT;
+ALTER ROLE "test_role" SET COMMENT = 'test_comment';
+ALTER ROLE "test_role" UNSET COMMENT;
+ALTER ROLE IF EXISTS "test_role" SET TAG TAG1 = 'value1';
+ALTER ROLE IF EXISTS "test_role" SET TAG TAG1 = 'value1', TAG1 = 'value2', TAG1 = 'value3';
+ALTER ROLE IF EXISTS "test_role" UNSET TAG TAG1;
+ALTER ROLE IF EXISTS "test_role" UNSET TAG TAG1, TAG2, TAG3;
diff --git a/test/fixtures/dialects/snowflake/snowflake_alter_role.yml b/test/fixtures/dialects/snowflake/snowflake_alter_role.yml
new file mode 100644
index 0000000..48e4aed
--- /dev/null
+++ b/test/fixtures/dialects/snowflake/snowflake_alter_role.yml
@@ -0,0 +1,157 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 72a427cac4910bd00b0e70ccbc8fea04c0d16f25ddc293b83871cfb5d25b05d3
+file:
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - keyword: IF
+    - keyword: EXISTS
+    - role_reference:
+        quoted_identifier: '"test_role"'
+    - keyword: RENAME
+    - keyword: TO
+    - role_reference:
+        quoted_identifier: '"prod_role"'
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - role_reference:
+        quoted_identifier: '"test_role"'
+    - keyword: RENAME
+    - keyword: TO
+    - role_reference:
+        quoted_identifier: '"prod_role"'
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - keyword: IF
+    - keyword: EXISTS
+    - role_reference:
+        quoted_identifier: '"test_role"'
+    - keyword: SET
+    - keyword: COMMENT
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'test_comment'"
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - keyword: IF
+    - keyword: EXISTS
+    - role_reference:
+        quoted_identifier: '"test_role"'
+    - keyword: UNSET
+    - role_reference:
+        naked_identifier: COMMENT
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - role_reference:
+        quoted_identifier: '"test_role"'
+    - keyword: SET
+    - keyword: COMMENT
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'test_comment'"
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - role_reference:
+        quoted_identifier: '"test_role"'
+    - keyword: UNSET
+    - role_reference:
+        naked_identifier: COMMENT
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - keyword: IF
+    - keyword: EXISTS
+    - role_reference:
+        quoted_identifier: '"test_role"'
+    - keyword: SET
+    - tag_equals:
+        keyword: TAG
+        tag_reference:
+          naked_identifier: TAG1
+        comparison_operator:
+          raw_comparison_operator: '='
+        quoted_literal: "'value1'"
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - keyword: IF
+    - keyword: EXISTS
+    - role_reference:
+        quoted_identifier: '"test_role"'
+    - keyword: SET
+    - tag_equals:
+      - keyword: TAG
+      - tag_reference:
+          naked_identifier: TAG1
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'value1'"
+      - comma: ','
+      - tag_reference:
+          naked_identifier: TAG1
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'value2'"
+      - comma: ','
+      - tag_reference:
+          naked_identifier: TAG1
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'value3'"
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - keyword: IF
+    - keyword: EXISTS
+    - role_reference:
+        quoted_identifier: '"test_role"'
+    - keyword: UNSET
+    - keyword: TAG
+    - tag_reference:
+        naked_identifier: TAG1
+- statement_terminator: ;
+- statement:
+    alter_role_statement:
+    - keyword: ALTER
+    - keyword: ROLE
+    - keyword: IF
+    - keyword: EXISTS
+    - role_reference:
+        quoted_identifier: '"test_role"'
+    - keyword: UNSET
+    - keyword: TAG
+    - tag_reference:
+        naked_identifier: TAG1
+    - comma: ','
+    - tag_reference:
+        naked_identifier: TAG2
+    - comma: ','
+    - tag_reference:
+        naked_identifier: TAG3
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/snowflake/snowflake_alter_storage_integration.sql b/test/fixtures/dialects/snowflake/snowflake_alter_storage_integration.sql
new file mode 100644
index 0000000..18606f3
--- /dev/null
+++ b/test/fixtures/dialects/snowflake/snowflake_alter_storage_integration.sql
@@ -0,0 +1,114 @@
+alter storage integration test_integration set
+    tag tag1 = 'value1';
+
+alter storage integration test_integration set
+    tag tag1 = 'value1', tag2 = 'value2';
+
+alter storage integration test_integration
+    set comment = 'test comment';
+
+alter storage integration test_integration unset
+    comment;
+
+alter storage integration test_integration unset
+    tag tag1, tag2;
+
+alter storage integration if exists test_integration unset
+    tag tag1, tag2;
+
+alter storage integration test_integration unset
+     enabled;
+
+alter storage integration test_integration unset
+    comment;
+
+alter storage integration test_integration unset
+    storage_blocked_locations;
+
+alter storage integration test_integration set
+    enabled = true;
+
+alter storage integration test_integration
+    set enabled = false
+    comment = 'test comment';
+
+alter storage integration test_integration set
+    comment = 'test comment'
+    enabled = false;
+
+alter storage integration test_integration set
+    storage_aws_role_arn = 'test_role_arn';
+
+alter storage integration test_integration set
+    storage_aws_object_acl = 'test_object_acl';
+
+alter storage integration test_integration set
+    azure_tenant_id = 'test_azure_tenant_id';
+
+alter storage integration s3_int set
+  storage_aws_role_arn = 'arn:aws:iam::001234567890:role/myrole'
+  enabled = true
+  storage_allowed_locations = (
+    's3://mybucket1', 's3://mybucket2/'
+  );
+
+alter storage integration gcs_int set
+  enabled = true
+  storage_allowed_locations = (
+    'gcs://mybucket1/path1/',
+    'gcs://mybucket2/path2/'
+  );
+
+alter storage integration azure_int set
+  enabled = true
+  azure_tenant_id = 'a123b4c5-1234-123a-a12b-1a23b45678c9'
+  storage_allowed_locations = (
+    'azure://myaccount.blob.core.windows.net/mycontainer/path1/',
+    'azure://myaccount.blob.core.windows.net/mycontainer/path2/'
+  );
+
+alter storage integration s3_int set
+  storage_aws_role_arn = 'arn:aws:iam::001234567890:role/myrole'
+  enabled = true
+  storage_allowed_locations = ('*')
+  storage_blocked_locations = (
+    's3://mybucket3/path3/', 's3://mybucket4/path4/'
+    );
+
+
+alter storage integration gcs_int set
+  enabled = true
+  storage_allowed_locations = ('*')
+  storage_blocked_locations = (
+    'gcs://mybucket3/path3/', 'gcs://mybucket4/path4/'
+    );
+
+
+alter storage integration azure_int set
+  enabled = true
+  azure_tenant_id = 'a123b4c5-1234-123a-a12b-1a23b45678c9'
+  storage_allowed_locations = ('*')
+  storage_blocked_locations = (
+    'azure://myaccount.blob.core.windows.net/mycontainer/path3/',
+    'azure://myaccount.blob.core.windows.net/mycontainer/path4/'
+    );
+
+alter storage integration azure_int set
+  enabled = true
+  comment = 'test_comment'
+  azure_tenant_id = 'a123b4c5-1234-123a-a12b-1a23b45678c9'
+  storage_allowed_locations = ('*')
+  storage_blocked_locations = (
+    'azure://myaccount.blob.core.windows.net/mycontainer/path3/',
+    'azure://myaccount.blob.core.windows.net/mycontainer/path4/'
+    );
+
+alter storage integration if exists azure_int set
+  enabled = true
+  comment = 'test_comment'
+  azure_tenant_id = 'a123b4c5-1234-123a-a12b-1a23b45678c9'
+  storage_allowed_locations = ('*')
+  storage_blocked_locations = (
+    'azure://myaccount.blob.core.windows.net/mycontainer/path3/',
+    'azure://myaccount.blob.core.windows.net/mycontainer/path4/'
+    );
diff --git a/test/fixtures/dialects/snowflake/snowflake_alter_storage_integration.yml b/test/fixtures/dialects/snowflake/snowflake_alter_storage_integration.yml
new file mode 100644
index 0000000..4b1c334
--- /dev/null
+++ b/test/fixtures/dialects/snowflake/snowflake_alter_storage_integration.yml
@@ -0,0 +1,461 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 9f3c2e27d2d0d69837f87b5cf9864a70a48856d013b05649a9170d67acfde55d
+file:
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: test_integration
+    - keyword: set
+    - tag_equals:
+        keyword: tag
+        tag_reference:
+          naked_identifier: tag1
+        comparison_operator:
+          raw_comparison_operator: '='
+        quoted_literal: "'value1'"
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: test_integration
+    - keyword: set
+    - tag_equals:
+      - keyword: tag
+      - tag_reference:
+          naked_identifier: tag1
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'value1'"
+      - comma: ','
+      - tag_reference:
+          naked_identifier: tag2
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'value2'"
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: test_integration
+    - keyword: set
+    - keyword: comment
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'test comment'"
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: test_integration
+    - keyword: unset
+    - keyword: comment
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: test_integration
+    - keyword: unset
+    - keyword: tag
+    - tag_reference:
+        naked_identifier: tag1
+    - comma: ','
+    - tag_reference:
+        naked_identifier: tag2
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - keyword: if
+    - keyword: exists
+    - object_reference:
+        naked_identifier: test_integration
+    - keyword: unset
+    - keyword: tag
+    - tag_reference:
+        naked_identifier: tag1
+    - comma: ','
+    - tag_reference:
+        naked_identifier: tag2
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: test_integration
+    - keyword: unset
+    - keyword: enabled
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: test_integration
+    - keyword: unset
+    - keyword: comment
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: test_integration
+    - keyword: unset
+    - keyword: storage_blocked_locations
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: test_integration
+    - keyword: set
+    - keyword: enabled
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - boolean_literal: 'true'
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: test_integration
+    - keyword: set
+    - keyword: enabled
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - boolean_literal: 'false'
+    - keyword: comment
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'test comment'"
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: test_integration
+    - keyword: set
+    - keyword: comment
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'test comment'"
+    - keyword: enabled
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - boolean_literal: 'false'
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: test_integration
+    - keyword: set
+    - keyword: storage_aws_role_arn
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'test_role_arn'"
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: test_integration
+    - keyword: set
+    - keyword: storage_aws_object_acl
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'test_object_acl'"
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: test_integration
+    - keyword: set
+    - keyword: azure_tenant_id
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'test_azure_tenant_id'"
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: s3_int
+    - keyword: set
+    - keyword: storage_aws_role_arn
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'"
+    - keyword: enabled
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - boolean_literal: 'true'
+    - keyword: storage_allowed_locations
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+      - start_bracket: (
+      - bucket_path: "'s3://mybucket1'"
+      - comma: ','
+      - bucket_path: "'s3://mybucket2/'"
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: gcs_int
+    - keyword: set
+    - keyword: enabled
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - boolean_literal: 'true'
+    - keyword: storage_allowed_locations
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+      - start_bracket: (
+      - bucket_path: "'gcs://mybucket1/path1/'"
+      - comma: ','
+      - bucket_path: "'gcs://mybucket2/path2/'"
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: azure_int
+    - keyword: set
+    - keyword: enabled
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - boolean_literal: 'true'
+    - keyword: azure_tenant_id
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'"
+    - keyword: storage_allowed_locations
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+      - start_bracket: (
+      - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path1/'"
+      - comma: ','
+      - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path2/'"
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: s3_int
+    - keyword: set
+    - keyword: storage_aws_role_arn
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'"
+    - keyword: enabled
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - boolean_literal: 'true'
+    - keyword: storage_allowed_locations
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+        start_bracket: (
+        quoted_star: "'*'"
+        end_bracket: )
+    - keyword: storage_blocked_locations
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+      - start_bracket: (
+      - bucket_path: "'s3://mybucket3/path3/'"
+      - comma: ','
+      - bucket_path: "'s3://mybucket4/path4/'"
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: gcs_int
+    - keyword: set
+    - keyword: enabled
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - boolean_literal: 'true'
+    - keyword: storage_allowed_locations
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+        start_bracket: (
+        quoted_star: "'*'"
+        end_bracket: )
+    - keyword: storage_blocked_locations
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+      - start_bracket: (
+      - bucket_path: "'gcs://mybucket3/path3/'"
+      - comma: ','
+      - bucket_path: "'gcs://mybucket4/path4/'"
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: azure_int
+    - keyword: set
+    - keyword: enabled
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - boolean_literal: 'true'
+    - keyword: azure_tenant_id
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'"
+    - keyword: storage_allowed_locations
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+        start_bracket: (
+        quoted_star: "'*'"
+        end_bracket: )
+    - keyword: storage_blocked_locations
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+      - start_bracket: (
+      - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path3/'"
+      - comma: ','
+      - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path4/'"
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - object_reference:
+        naked_identifier: azure_int
+    - keyword: set
+    - keyword: enabled
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - boolean_literal: 'true'
+    - keyword: comment
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'test_comment'"
+    - keyword: azure_tenant_id
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'"
+    - keyword: storage_allowed_locations
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+        start_bracket: (
+        quoted_star: "'*'"
+        end_bracket: )
+    - keyword: storage_blocked_locations
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+      - start_bracket: (
+      - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path3/'"
+      - comma: ','
+      - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path4/'"
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_storage_integration_statement:
+    - keyword: alter
+    - keyword: storage
+    - keyword: integration
+    - keyword: if
+    - keyword: exists
+    - object_reference:
+        naked_identifier: azure_int
+    - keyword: set
+    - keyword: enabled
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - boolean_literal: 'true'
+    - keyword: comment
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'test_comment'"
+    - keyword: azure_tenant_id
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'"
+    - keyword: storage_allowed_locations
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+        start_bracket: (
+        quoted_star: "'*'"
+        end_bracket: )
+    - keyword: storage_blocked_locations
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+      - start_bracket: (
+      - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path3/'"
+      - comma: ','
+      - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path4/'"
+      - end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/snowflake/snowflake_alter_table_clustering_action.yml b/test/fixtures/dialects/snowflake/snowflake_alter_table_clustering_action.yml
index eeb1402..db8d581 100644
--- a/test/fixtures/dialects/snowflake/snowflake_alter_table_clustering_action.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_alter_table_clustering_action.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: d973326796043b7eb491c8672cf689d080f1b369077da228054d3acd88a1ba17
+_hash: 5462f4c98240c76d3b93e026e081026dcbd975fe32c9ed5f249a06470c14dbc5
 file:
 - statement:
     alter_table_statement:
@@ -80,7 +80,7 @@ file:
             cast_expression:
               column_reference:
                 naked_identifier: v
-              snowflake_semi_structured_expression:
+              semi_structured_expression:
               - colon: ':'
               - semi_structured_element: '"Data"'
               - colon: ':'
diff --git a/test/fixtures/dialects/snowflake/snowflake_alter_table_column.sql b/test/fixtures/dialects/snowflake/snowflake_alter_table_column.sql
index cd8d066..72379ad 100644
--- a/test/fixtures/dialects/snowflake/snowflake_alter_table_column.sql
+++ b/test/fixtures/dialects/snowflake/snowflake_alter_table_column.sql
@@ -63,6 +63,11 @@ alter table empl_info modify
   , column empl_dob unset masking policy
 ;
 
+--- Set Tag
+ALTER TABLE my_table MODIFY COLUMN my_column SET TAG my_tag = 'tagged';
+
+--- Unset Tag
+ALTER TABLE my_table MODIFY COLUMN my_column UNSET TAG my_tag;
 
 -- Drop column
 ALTER TABLE empl_info DROP COLUMN my_column;
diff --git a/test/fixtures/dialects/snowflake/snowflake_alter_table_column.yml b/test/fixtures/dialects/snowflake/snowflake_alter_table_column.yml
index 0ccc337..218ea11 100644
--- a/test/fixtures/dialects/snowflake/snowflake_alter_table_column.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_alter_table_column.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 22878f1f16fcd23b97eeabe2abce943e02764130cdad64ba43b7353535333d37
+_hash: a661d921f72d807ed26fa97225e83c40b6e705e975a7fe0622e259a0999323e8
 file:
 - statement:
     alter_table_statement:
@@ -32,11 +32,11 @@ file:
           naked_identifier: my_column
           data_type:
             data_type_identifier: VARCHAR
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '5000'
-              end_bracket: )
+                end_bracket: )
           column_constraint_segment:
           - keyword: NOT
           - keyword: 'NULL'
@@ -352,11 +352,11 @@ file:
       - keyword: type
       - data_type:
           data_type_identifier: varchar
-          bracketed:
-            start_bracket: (
-            expression:
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
               numeric_literal: '50'
-            end_bracket: )
+              end_bracket: )
       - comma: ','
       - keyword: column
       - column_reference:
@@ -516,6 +516,41 @@ file:
       - keyword: masking
       - keyword: policy
 - statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: my_table
+    - alter_table_table_column_action:
+      - keyword: MODIFY
+      - keyword: COLUMN
+      - column_reference:
+          naked_identifier: my_column
+      - keyword: SET
+      - keyword: TAG
+      - tag_reference:
+          naked_identifier: my_tag
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'tagged'"
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: my_table
+    - alter_table_table_column_action:
+      - keyword: MODIFY
+      - keyword: COLUMN
+      - column_reference:
+          naked_identifier: my_column
+      - keyword: UNSET
+      - keyword: TAG
+      - tag_reference:
+          naked_identifier: my_tag
+- statement_terminator: ;
 - statement:
     alter_table_statement:
     - keyword: ALTER
diff --git a/test/fixtures/dialects/snowflake/snowflake_alter_warehouse.sql b/test/fixtures/dialects/snowflake/snowflake_alter_warehouse.sql
index 5b10073..00af8da 100644
--- a/test/fixtures/dialects/snowflake/snowflake_alter_warehouse.sql
+++ b/test/fixtures/dialects/snowflake/snowflake_alter_warehouse.sql
@@ -21,3 +21,6 @@ alter warehouse LOAD_WH SET MAX_CONCURRENCY_LEVEL = 1;
 alter warehouse LOAD_WH UNSET STATEMENT_QUEUED_TIMEOUT_IN_SECONDS;
 alter warehouse LOAD_WH UNSET WAREHOUSE_SIZE;
 alter warehouse LOAD_WH UNSET WAREHOUSE_SIZE, WAIT_FOR_COMPLETION;
+
+ALTER WAREHOUSE SET WAREHOUSE_SIZE='X-LARGE';
+alter warehouse set warehouse_size=medium
diff --git a/test/fixtures/dialects/snowflake/snowflake_alter_warehouse.yml b/test/fixtures/dialects/snowflake/snowflake_alter_warehouse.yml
index 3f7c479..36a88af 100644
--- a/test/fixtures/dialects/snowflake/snowflake_alter_warehouse.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_alter_warehouse.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 4665ce4dc4718052ca9b6e69c12240d80062e18fdd955eda3471183c70e3a40e
+_hash: 6aef694b439b392aca42cbf1d8e3d55dd6b90b9c6489ff07af83528e281a1fdc
 file:
 - statement:
     alter_warehouse_statement:
@@ -271,3 +271,24 @@ file:
     - comma: ','
     - naked_identifier: WAIT_FOR_COMPLETION
 - statement_terminator: ;
+- statement:
+    alter_warehouse_statement:
+    - keyword: ALTER
+    - keyword: WAREHOUSE
+    - keyword: SET
+    - warehouse_object_properties:
+        keyword: WAREHOUSE_SIZE
+        comparison_operator:
+          raw_comparison_operator: '='
+        warehouse_size: "'X-LARGE'"
+- statement_terminator: ;
+- statement:
+    alter_warehouse_statement:
+    - keyword: alter
+    - keyword: warehouse
+    - keyword: set
+    - warehouse_object_properties:
+        keyword: warehouse_size
+        comparison_operator:
+          raw_comparison_operator: '='
+        warehouse_size: medium
diff --git a/test/fixtures/dialects/snowflake/snowflake_bare_functions.sql b/test/fixtures/dialects/snowflake/snowflake_bare_functions.sql
new file mode 100644
index 0000000..f880387
--- /dev/null
+++ b/test/fixtures/dialects/snowflake/snowflake_bare_functions.sql
@@ -0,0 +1,8 @@
+SELECT
+  CURRENT_TIMESTAMP
+  , CURRENT_TIME
+  , CURRENT_DATE
+  , CURRENT_USER
+  , LOCALTIME
+  , LOCALTIMESTAMP
+;
diff --git a/test/fixtures/dialects/snowflake/snowflake_bare_functions.yml b/test/fixtures/dialects/snowflake/snowflake_bare_functions.yml
new file mode 100644
index 0000000..ae5900a
--- /dev/null
+++ b/test/fixtures/dialects/snowflake/snowflake_bare_functions.yml
@@ -0,0 +1,29 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: ab003ddfbb797cb27d30cc9c29af599f03eeae608c276a8104bb77ed8b79fc78
+file:
+  statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          bare_function: CURRENT_TIMESTAMP
+      - comma: ','
+      - select_clause_element:
+          bare_function: CURRENT_TIME
+      - comma: ','
+      - select_clause_element:
+          bare_function: CURRENT_DATE
+      - comma: ','
+      - select_clause_element:
+          bare_function: CURRENT_USER
+      - comma: ','
+      - select_clause_element:
+          bare_function: LOCALTIME
+      - comma: ','
+      - select_clause_element:
+          bare_function: LOCALTIMESTAMP
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/snowflake/snowflake_copy_into_table.sql b/test/fixtures/dialects/snowflake/snowflake_copy_into_table.sql
index d4c47e9..e3b8273 100644
--- a/test/fixtures/dialects/snowflake/snowflake_copy_into_table.sql
+++ b/test/fixtures/dialects/snowflake/snowflake_copy_into_table.sql
@@ -18,6 +18,11 @@ from @my_int_stage
   file_format = (format_name = myformat)
   pattern='.*sales.*[.]csv';
 
+copy into mytable
+from @my_int_stage
+  file_format = (format_name = myformat)
+  pattern=$my_var;
+
 copy into mytable;
 
 copy into mytable
diff --git a/test/fixtures/dialects/snowflake/snowflake_copy_into_table.yml b/test/fixtures/dialects/snowflake/snowflake_copy_into_table.yml
index d10f3cd..cb40717 100644
--- a/test/fixtures/dialects/snowflake/snowflake_copy_into_table.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_copy_into_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 0db76c650de2449edceff9f0debbedd3c0fdda7271f6a29dbf2336f5e5b0c4bd
+_hash: 8c4d4e7e1e19b88fd5a9c7ea830ff7525d26b585df90d081acfbcf8a23b3a30f
 file:
 - statement:
     copy_into_table_statement:
@@ -110,6 +110,32 @@ file:
         raw_comparison_operator: '='
     - quoted_literal: "'.*sales.*[.]csv'"
 - statement_terminator: ;
+- statement:
+    copy_into_table_statement:
+    - keyword: copy
+    - keyword: into
+    - table_reference:
+        naked_identifier: mytable
+    - keyword: from
+    - storage_location:
+        stage_path: '@my_int_stage'
+    - keyword: file_format
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - file_format_segment:
+        bracketed:
+          start_bracket: (
+          keyword: format_name
+          comparison_operator:
+            raw_comparison_operator: '='
+          object_reference:
+            naked_identifier: myformat
+          end_bracket: )
+    - keyword: pattern
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - variable: $my_var
+- statement_terminator: ;
 - statement:
     copy_into_table_statement:
     - keyword: copy
diff --git a/test/fixtures/dialects/snowflake/snowflake_create_clone.sql b/test/fixtures/dialects/snowflake/snowflake_create_clone.sql
new file mode 100644
index 0000000..7db4fb5
--- /dev/null
+++ b/test/fixtures/dialects/snowflake/snowflake_create_clone.sql
@@ -0,0 +1,11 @@
+CREATE DATABASE mytestdb_clone CLONE mytestdb;
+
+CREATE SCHEMA mytestschema_clone CLONE testschema;
+
+CREATE TABLE orders_clone CLONE orders;
+
+CREATE SCHEMA mytestschema_clone_restore CLONE testschema BEFORE (TIMESTAMP => TO_TIMESTAMP(40*365*86400));
+
+CREATE TABLE orders_clone_restore CLONE orders AT (TIMESTAMP => TO_TIMESTAMP_TZ('04/05/2013 01:02:03', 'mm/dd/yyyy hh24:mi:ss'));
+
+CREATE TABLE orders_clone_restore CLONE orders BEFORE (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726');
diff --git a/test/fixtures/dialects/snowflake/snowflake_create_clone.yml b/test/fixtures/dialects/snowflake/snowflake_create_clone.yml
new file mode 100644
index 0000000..9af3b3c
--- /dev/null
+++ b/test/fixtures/dialects/snowflake/snowflake_create_clone.yml
@@ -0,0 +1,115 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 549e4efbfe226096e1bc994ca038c5641128fd54e090debe72634e6436c9cd99
+file:
+- statement:
+    create_clone_statement:
+    - keyword: CREATE
+    - keyword: DATABASE
+    - object_reference:
+        naked_identifier: mytestdb_clone
+    - keyword: CLONE
+    - object_reference:
+        naked_identifier: mytestdb
+- statement_terminator: ;
+- statement:
+    create_clone_statement:
+    - keyword: CREATE
+    - keyword: SCHEMA
+    - object_reference:
+        naked_identifier: mytestschema_clone
+    - keyword: CLONE
+    - object_reference:
+        naked_identifier: testschema
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: orders_clone
+    - keyword: CLONE
+    - table_reference:
+        naked_identifier: orders
+- statement_terminator: ;
+- statement:
+    create_clone_statement:
+    - keyword: CREATE
+    - keyword: SCHEMA
+    - object_reference:
+        naked_identifier: mytestschema_clone_restore
+    - keyword: CLONE
+    - object_reference:
+        naked_identifier: testschema
+    - from_before_expression:
+        keyword: BEFORE
+        bracketed:
+          start_bracket: (
+          keyword: TIMESTAMP
+          parameter_assigner: =>
+          expression:
+            function:
+              function_name:
+                function_name_identifier: TO_TIMESTAMP
+              bracketed:
+                start_bracket: (
+                expression:
+                - numeric_literal: '40'
+                - binary_operator: '*'
+                - numeric_literal: '365'
+                - binary_operator: '*'
+                - numeric_literal: '86400'
+                end_bracket: )
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_clone_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - object_reference:
+        naked_identifier: orders_clone_restore
+    - keyword: CLONE
+    - object_reference:
+        naked_identifier: orders
+    - from_at_expression:
+        keyword: AT
+        bracketed:
+          start_bracket: (
+          keyword: TIMESTAMP
+          parameter_assigner: =>
+          expression:
+            function:
+              function_name:
+                function_name_identifier: TO_TIMESTAMP_TZ
+              bracketed:
+              - start_bracket: (
+              - expression:
+                  quoted_literal: "'04/05/2013 01:02:03'"
+              - comma: ','
+              - expression:
+                  quoted_literal: "'mm/dd/yyyy hh24:mi:ss'"
+              - end_bracket: )
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_clone_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - object_reference:
+        naked_identifier: orders_clone_restore
+    - keyword: CLONE
+    - object_reference:
+        naked_identifier: orders
+    - from_before_expression:
+        keyword: BEFORE
+        bracketed:
+          start_bracket: (
+          keyword: STATEMENT
+          parameter_assigner: =>
+          expression:
+            quoted_literal: "'8e5d0ca9-005e-44e6-b858-a8f5b37c5726'"
+          end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/snowflake/snowflake_create_external_table.yml b/test/fixtures/dialects/snowflake/snowflake_create_external_table.yml
index e61c5dd..83a0a71 100644
--- a/test/fixtures/dialects/snowflake/snowflake_create_external_table.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_create_external_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: af66c03d2426536302e47edcc891a5fdf0e17bbfd2c5486e5682142a3d157399
+_hash: 4365dc3e6ab7cd269f5d84bafa6c9298f3eb5e5d2e2727235ab88d604bcc102a
 file:
 - statement:
     create_external_table_statement:
@@ -96,7 +96,7 @@ file:
                 cast_expression:
                   column_reference:
                     naked_identifier: VALUE
-                  snowflake_semi_structured_expression:
+                  semi_structured_expression:
                     colon: ':'
                     semi_structured_element: c1
                   casting_operator: '::'
@@ -121,7 +121,7 @@ file:
                 cast_expression:
                   column_reference:
                     naked_identifier: VALUE
-                  snowflake_semi_structured_expression:
+                  semi_structured_expression:
                     colon: ':'
                     semi_structured_element: c2
                   casting_operator: '::'
@@ -146,7 +146,7 @@ file:
                 cast_expression:
                   column_reference:
                     naked_identifier: VALUE
-                  snowflake_semi_structured_expression:
+                  semi_structured_expression:
                     colon: ':'
                     semi_structured_element: c3
                   casting_operator: '::'
@@ -171,7 +171,7 @@ file:
                 cast_expression:
                   column_reference:
                     naked_identifier: VALUE
-                  snowflake_semi_structured_expression:
+                  semi_structured_expression:
                     colon: ':'
                     semi_structured_element: c4
                   casting_operator: '::'
@@ -192,7 +192,7 @@ file:
             cast_expression:
               column_reference:
                 naked_identifier: VALUE
-              snowflake_semi_structured_expression:
+              semi_structured_expression:
                 colon: ':'
                 semi_structured_element: c5
               casting_operator: '::'
@@ -214,7 +214,7 @@ file:
                 cast_expression:
                   column_reference:
                     naked_identifier: VALUE
-                  snowflake_semi_structured_expression:
+                  semi_structured_expression:
                     colon: ':'
                     semi_structured_element: c6
                   casting_operator: '::'
@@ -239,7 +239,7 @@ file:
                 cast_expression:
                   column_reference:
                     naked_identifier: VALUE
-                  snowflake_semi_structured_expression:
+                  semi_structured_expression:
                     colon: ':'
                     semi_structured_element: c7
                   casting_operator: '::'
@@ -264,7 +264,7 @@ file:
                 cast_expression:
                   column_reference:
                     naked_identifier: VALUE
-                  snowflake_semi_structured_expression:
+                  semi_structured_expression:
                     colon: ':'
                     semi_structured_element: c8
                   casting_operator: '::'
@@ -285,7 +285,7 @@ file:
             cast_expression:
               column_reference:
                 naked_identifier: VALUE
-              snowflake_semi_structured_expression:
+              semi_structured_expression:
                 colon: ':'
                 semi_structured_element: c9
               casting_operator: '::'
@@ -307,7 +307,7 @@ file:
                 cast_expression:
                   column_reference:
                     naked_identifier: VALUE
-                  snowflake_semi_structured_expression:
+                  semi_structured_expression:
                     colon: ':'
                     semi_structured_element: c10
                   casting_operator: '::'
@@ -332,7 +332,7 @@ file:
                 cast_expression:
                   column_reference:
                     naked_identifier: VALUE
-                  snowflake_semi_structured_expression:
+                  semi_structured_expression:
                     colon: ':'
                     semi_structured_element: c11
                   casting_operator: '::'
@@ -357,7 +357,7 @@ file:
                 cast_expression:
                   column_reference:
                     naked_identifier: VALUE
-                  snowflake_semi_structured_expression:
+                  semi_structured_expression:
                     colon: ':'
                     semi_structured_element: c12
                   casting_operator: '::'
@@ -378,7 +378,7 @@ file:
             cast_expression:
               column_reference:
                 naked_identifier: VALUE
-              snowflake_semi_structured_expression:
+              semi_structured_expression:
                 colon: ':'
                 semi_structured_element: c13
               casting_operator: '::'
@@ -396,7 +396,7 @@ file:
             cast_expression:
               column_reference:
                 naked_identifier: VALUE
-              snowflake_semi_structured_expression:
+              semi_structured_expression:
                 colon: ':'
                 semi_structured_element: c14
               casting_operator: '::'
@@ -418,7 +418,7 @@ file:
                 cast_expression:
                   column_reference:
                     naked_identifier: VALUE
-                  snowflake_semi_structured_expression:
+                  semi_structured_expression:
                     colon: ':'
                     semi_structured_element: c15
                   casting_operator: '::'
@@ -443,7 +443,7 @@ file:
                 cast_expression:
                   column_reference:
                     naked_identifier: VALUE
-                  snowflake_semi_structured_expression:
+                  semi_structured_expression:
                     colon: ':'
                     semi_structured_element: c16
                   casting_operator: '::'
diff --git a/test/fixtures/dialects/snowflake/snowflake_create_function.sql b/test/fixtures/dialects/snowflake/snowflake_create_function.sql
index 853757e..edfe4a8 100644
--- a/test/fixtures/dialects/snowflake/snowflake_create_function.sql
+++ b/test/fixtures/dialects/snowflake/snowflake_create_function.sql
@@ -42,3 +42,56 @@ CREATE FUNCTION IF NOT EXISTS simple_table_function ()
     UNION ALL
     SELECT 3, 4
   $$;
+
+create function my_decrement_udf(i numeric(9, 0))
+    returns numeric
+    language java
+    imports = ('@~/my_decrement_udf_package_dir/my_decrement_udf_jar.jar')
+    handler = 'my_decrement_udf_package.my_decrement_udf_class.my_decrement_udf_method'
+    ;
+
+create or replace function echo_varchar(x varchar)
+returns varchar
+language java
+called on null input
+handler='TestFunc.echoVarchar'
+target_path='@~/testfunc.jar'
+as
+'class TestFunc {
+  public static String echoVarchar(String x) {
+    return x;
+  }
+}';
+
+create or replace function py_udf()
+  returns variant
+  language python
+  runtime_version = '3.8'
+  packages = ('numpy','pandas','xgboost==1.5.0')
+  handler = 'udf'
+as $$
+import numpy as np
+import pandas as pd
+import xgboost as xgb
+def udf():
+    return [np.__version__, pd.__version__, xgb.__version__]
+$$;
+
+create or replace function dream(i int)
+  returns variant
+  language python
+  runtime_version = '3.8'
+  handler = 'sleepy.snore'
+  imports = ('@my_stage/sleepy.py')
+;
+
+create or replace function addone(i int)
+returns int
+language python
+runtime_version = '3.8'
+handler = 'addone_py'
+as
+$$
+def addone_py(i):
+  return i+1
+$$;
diff --git a/test/fixtures/dialects/snowflake/snowflake_create_function.yml b/test/fixtures/dialects/snowflake/snowflake_create_function.yml
index 3049937..06071c8 100644
--- a/test/fixtures/dialects/snowflake/snowflake_create_function.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_create_function.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 3794c727e6ea51d7748a9aef8ef801977031285882a50454ca7145a7746b11d3
+_hash: 0630997eb3e8b4bfb534a6524c14c7cab5a4bdd141bda2448a39df6e994d8b1d
 file:
 - statement:
     create_function_statement:
@@ -144,3 +144,184 @@ file:
     - keyword: AS
     - udf_body: "$$\n    SELECT 1, 2\n    UNION ALL\n    SELECT 3, 4\n  $$"
 - statement_terminator: ;
+- statement:
+    create_function_statement:
+    - keyword: create
+    - keyword: function
+    - function_name:
+        function_name_identifier: my_decrement_udf
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          parameter: i
+          data_type:
+            data_type_identifier: numeric
+            bracketed_arguments:
+              bracketed:
+              - start_bracket: (
+              - numeric_literal: '9'
+              - comma: ','
+              - numeric_literal: '0'
+              - end_bracket: )
+          end_bracket: )
+    - keyword: returns
+    - data_type:
+        data_type_identifier: numeric
+    - keyword: language
+    - keyword: java
+    - keyword: imports
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+        start_bracket: (
+        quoted_literal: "'@~/my_decrement_udf_package_dir/my_decrement_udf_jar.jar'"
+        end_bracket: )
+    - keyword: handler
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'my_decrement_udf_package.my_decrement_udf_class.my_decrement_udf_method'"
+- statement_terminator: ;
+- statement:
+    create_function_statement:
+    - keyword: create
+    - keyword: or
+    - keyword: replace
+    - keyword: function
+    - function_name:
+        function_name_identifier: echo_varchar
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          parameter: x
+          data_type:
+            data_type_identifier: varchar
+          end_bracket: )
+    - keyword: returns
+    - data_type:
+        data_type_identifier: varchar
+    - keyword: language
+    - keyword: java
+    - keyword: called
+    - keyword: 'on'
+    - keyword: 'null'
+    - keyword: input
+    - keyword: handler
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'TestFunc.echoVarchar'"
+    - keyword: target_path
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'@~/testfunc.jar'"
+    - keyword: as
+    - udf_body: "'class TestFunc {\n  public static String echoVarchar(String x) {\n\
+        \    return x;\n  }\n}'"
+- statement_terminator: ;
+- statement:
+    create_function_statement:
+    - keyword: create
+    - keyword: or
+    - keyword: replace
+    - keyword: function
+    - function_name:
+        function_name_identifier: py_udf
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          end_bracket: )
+    - keyword: returns
+    - data_type:
+        data_type_identifier: variant
+    - keyword: language
+    - keyword: python
+    - keyword: runtime_version
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'3.8'"
+    - keyword: packages
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+      - start_bracket: (
+      - quoted_literal: "'numpy'"
+      - comma: ','
+      - quoted_literal: "'pandas'"
+      - comma: ','
+      - quoted_literal: "'xgboost==1.5.0'"
+      - end_bracket: )
+    - keyword: handler
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'udf'"
+    - keyword: as
+    - udf_body: "$$\nimport numpy as np\nimport pandas as pd\nimport xgboost as xgb\n\
+        def udf():\n    return [np.__version__, pd.__version__, xgb.__version__]\n\
+        $$"
+- statement_terminator: ;
+- statement:
+    create_function_statement:
+    - keyword: create
+    - keyword: or
+    - keyword: replace
+    - keyword: function
+    - function_name:
+        function_name_identifier: dream
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          parameter: i
+          data_type:
+            data_type_identifier: int
+          end_bracket: )
+    - keyword: returns
+    - data_type:
+        data_type_identifier: variant
+    - keyword: language
+    - keyword: python
+    - keyword: runtime_version
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'3.8'"
+    - keyword: handler
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'sleepy.snore'"
+    - keyword: imports
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - bracketed:
+        start_bracket: (
+        quoted_literal: "'@my_stage/sleepy.py'"
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_function_statement:
+    - keyword: create
+    - keyword: or
+    - keyword: replace
+    - keyword: function
+    - function_name:
+        function_name_identifier: addone
+    - function_parameter_list:
+        bracketed:
+          start_bracket: (
+          parameter: i
+          data_type:
+            data_type_identifier: int
+          end_bracket: )
+    - keyword: returns
+    - data_type:
+        data_type_identifier: int
+    - keyword: language
+    - keyword: python
+    - keyword: runtime_version
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'3.8'"
+    - keyword: handler
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'addone_py'"
+    - keyword: as
+    - udf_body: "$$\ndef addone_py(i):\n  return i+1\n$$"
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/snowflake/snowflake_create_notification_integration.yml b/test/fixtures/dialects/snowflake/snowflake_create_notification_integration.yml
index 1354d9d..ae93e50 100644
--- a/test/fixtures/dialects/snowflake/snowflake_create_notification_integration.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_create_notification_integration.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 93ae46885aa1d2a3a2dadfaf15930633e76855483852508af8b77a7d7b187d86
+_hash: 3c33cb37933f7f7c8f6d7e34ed3788b87248be615d88b29b6dcd5e297b8ccd6f
 file:
 - statement:
     create_statement:
@@ -21,20 +21,18 @@ file:
     - comparison_operator:
         raw_comparison_operator: '='
     - keyword: queue
-    - notification_integration_parameters:
-      - keyword: notification_provider
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - keyword: gcp_pubsub
+    - keyword: notification_provider
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: gcp_pubsub
     - keyword: enabled
     - comparison_operator:
         raw_comparison_operator: '='
     - boolean_literal: 'true'
-    - notification_integration_parameters:
-        keyword: gcp_pubsub_subscription_name
-        comparison_operator:
-          raw_comparison_operator: '='
-        quoted_literal: "'projects/project-1234/subscriptions/sub2'"
+    - keyword: gcp_pubsub_subscription_name
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'projects/project-1234/subscriptions/sub2'"
 - statement_terminator: ;
 - statement:
     create_statement:
@@ -51,19 +49,18 @@ file:
     - comparison_operator:
         raw_comparison_operator: '='
     - keyword: queue
-    - storage_notification_parameters:
-      - keyword: notification_provider
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - keyword: azure_event_grid
-      - keyword: azure_storage_queue_primary_uri
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - quoted_literal: "'https://myqueue.queue.core.windows.net/mystoragequeue'"
-      - keyword: azure_tenant_id
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - quoted_literal: "'a123bcde-1234-5678-abc1-9abc12345678'"
+    - keyword: notification_provider
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: azure_event_grid
+    - keyword: azure_storage_queue_primary_uri
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'https://myqueue.queue.core.windows.net/mystoragequeue'"
+    - keyword: azure_tenant_id
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'a123bcde-1234-5678-abc1-9abc12345678'"
 - statement_terminator: ;
 - statement:
     create_statement:
@@ -80,24 +77,22 @@ file:
     - comparison_operator:
         raw_comparison_operator: '='
     - keyword: queue
-    - notification_integration_parameters:
-      - keyword: notification_provider
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - keyword: aws_sns
+    - keyword: notification_provider
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: aws_sns
     - keyword: direction
     - comparison_operator:
         raw_comparison_operator: '='
     - keyword: outbound
-    - notification_integration_parameters:
-      - keyword: aws_sns_topic_arn
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - quoted_literal: "'arn:aws:sns:us-east-2:111122223333:sns_topic'"
-      - keyword: aws_sns_role_arn
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - quoted_literal: "'arn:aws:iam::111122223333:role/error_sns_role'"
+    - keyword: aws_sns_topic_arn
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'arn:aws:sns:us-east-2:111122223333:sns_topic'"
+    - keyword: aws_sns_role_arn
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'arn:aws:iam::111122223333:role/error_sns_role'"
 - statement_terminator: ;
 - statement:
     create_statement:
@@ -114,20 +109,18 @@ file:
     - comparison_operator:
         raw_comparison_operator: '='
     - keyword: outbound
-    - notification_integration_parameters:
-      - keyword: notification_provider
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - keyword: gcp_pubsub
+    - keyword: notification_provider
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: gcp_pubsub
     - keyword: enabled
     - comparison_operator:
         raw_comparison_operator: '='
     - boolean_literal: 'true'
-    - notification_integration_parameters:
-        keyword: gcp_pubsub_topic_name
-        comparison_operator:
-          raw_comparison_operator: '='
-        quoted_literal: "'projects/sdm-prod/topics/mytopic'"
+    - keyword: gcp_pubsub_topic_name
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'projects/sdm-prod/topics/mytopic'"
 - statement_terminator: ;
 - statement:
     create_statement:
@@ -144,22 +137,20 @@ file:
     - comparison_operator:
         raw_comparison_operator: '='
     - keyword: queue
-    - storage_notification_parameters:
-      - keyword: notification_provider
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - keyword: azure_event_grid
+    - keyword: notification_provider
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: azure_event_grid
     - keyword: direction
     - comparison_operator:
         raw_comparison_operator: '='
     - keyword: outbound
-    - storage_notification_parameters:
-      - keyword: azure_event_grid_topic_endpoint
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - quoted_literal: "'https://myaccount.region-1.eventgrid.azure.net/api/events'"
-      - keyword: azure_tenant_id
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - quoted_literal: "'mytenantid'"
+    - keyword: azure_event_grid_topic_endpoint
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'https://myaccount.region-1.eventgrid.azure.net/api/events'"
+    - keyword: azure_tenant_id
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'mytenantid'"
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/snowflake/snowflake_create_procedure.yml b/test/fixtures/dialects/snowflake/snowflake_create_procedure.yml
index 6c71bb3..65970d9 100644
--- a/test/fixtures/dialects/snowflake/snowflake_create_procedure.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_create_procedure.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 609766cf0f7add54c181ff475b51330286da57701948ba9686bf1091661eed85
+_hash: 69630c224e7d42bd02d11e08fa7078e7b972ee4fabbb9150d5879b170ce1ee55
 file:
 - statement:
     create_procedure_statement:
@@ -75,23 +75,26 @@ file:
         - parameter: test_table
         - data_type:
             data_type_identifier: VARCHAR
-            bracketed:
-              start_bracket: (
-              end_bracket: )
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                end_bracket: )
         - comma: ','
         - parameter: test_col
         - data_type:
             data_type_identifier: VARCHAR
-            bracketed:
-              start_bracket: (
-              end_bracket: )
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                end_bracket: )
         - end_bracket: )
     - keyword: RETURNS
     - data_type:
         data_type_identifier: VARCHAR
-        bracketed:
-          start_bracket: (
-          end_bracket: )
+        bracketed_arguments:
+          bracketed:
+            start_bracket: (
+            end_bracket: )
     - keyword: LANGUAGE
     - keyword: JAVASCRIPT
     - keyword: AS
diff --git a/test/fixtures/dialects/snowflake/snowflake_create_storage_integration.yml b/test/fixtures/dialects/snowflake/snowflake_create_storage_integration.yml
index 184c876..098fe28 100644
--- a/test/fixtures/dialects/snowflake/snowflake_create_storage_integration.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_create_storage_integration.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: b2d55ad5ee705b8170c77854e66b327f5c2ee32c6bde3af3880c7d580bd9a8a8
+_hash: ad0c20e9dfd8115654ccb945b29b1cd0953917992860daa62a51dcded6c41f62
 file:
 - statement:
     create_statement:
@@ -16,15 +16,14 @@ file:
     - comparison_operator:
         raw_comparison_operator: '='
     - keyword: external_stage
-    - storage_integration_parameters:
-      - keyword: storage_provider
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - keyword: s3
-      - keyword: storage_aws_role_arn
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'"
+    - keyword: storage_provider
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: s3
+    - keyword: storage_aws_role_arn
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'"
     - keyword: enabled
     - comparison_operator:
         raw_comparison_operator: '='
@@ -50,15 +49,14 @@ file:
     - comparison_operator:
         raw_comparison_operator: '='
     - keyword: external_stage
-    - storage_integration_parameters:
-      - keyword: storage_provider
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - keyword: s3
-      - keyword: storage_aws_role_arn
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'"
+    - keyword: storage_provider
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: s3
+    - keyword: storage_aws_role_arn
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'"
     - keyword: enabled
     - comparison_operator:
         raw_comparison_operator: '='
@@ -84,11 +82,10 @@ file:
     - comparison_operator:
         raw_comparison_operator: '='
     - keyword: external_stage
-    - storage_integration_parameters:
-      - keyword: storage_provider
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - keyword: gcs
+    - keyword: storage_provider
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: gcs
     - keyword: enabled
     - comparison_operator:
         raw_comparison_operator: '='
@@ -114,20 +111,18 @@ file:
     - comparison_operator:
         raw_comparison_operator: '='
     - keyword: external_stage
-    - storage_integration_parameters:
-      - keyword: storage_provider
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - keyword: azure
+    - keyword: storage_provider
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: azure
     - keyword: enabled
     - comparison_operator:
         raw_comparison_operator: '='
     - boolean_literal: 'true'
-    - storage_integration_parameters:
-        keyword: azure_tenant_id
-        comparison_operator:
-          raw_comparison_operator: '='
-        quoted_literal: "'<tenant_id>'"
+    - keyword: azure_tenant_id
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'<tenant_id>'"
     - keyword: storage_allowed_locations
     - comparison_operator:
         raw_comparison_operator: '='
@@ -151,15 +146,14 @@ file:
     - comparison_operator:
         raw_comparison_operator: '='
     - keyword: external_stage
-    - storage_integration_parameters:
-      - keyword: storage_provider
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - keyword: s3
-      - keyword: storage_aws_role_arn
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'"
+    - keyword: storage_provider
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: s3
+    - keyword: storage_aws_role_arn
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'"
     - keyword: enabled
     - comparison_operator:
         raw_comparison_operator: '='
@@ -194,11 +188,10 @@ file:
     - comparison_operator:
         raw_comparison_operator: '='
     - keyword: external_stage
-    - storage_integration_parameters:
-      - keyword: storage_provider
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - keyword: gcs
+    - keyword: storage_provider
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: gcs
     - keyword: enabled
     - comparison_operator:
         raw_comparison_operator: '='
@@ -233,20 +226,18 @@ file:
     - comparison_operator:
         raw_comparison_operator: '='
     - keyword: external_stage
-    - storage_integration_parameters:
-      - keyword: storage_provider
-      - comparison_operator:
-          raw_comparison_operator: '='
-      - keyword: azure
+    - keyword: storage_provider
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: azure
     - keyword: enabled
     - comparison_operator:
         raw_comparison_operator: '='
     - boolean_literal: 'false'
-    - storage_integration_parameters:
-        keyword: azure_tenant_id
-        comparison_operator:
-          raw_comparison_operator: '='
-        quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'"
+    - keyword: azure_tenant_id
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'"
     - keyword: storage_allowed_locations
     - comparison_operator:
         raw_comparison_operator: '='
diff --git a/test/fixtures/dialects/snowflake/snowflake_create_table.yml b/test/fixtures/dialects/snowflake/snowflake_create_table.yml
index 04033a2..c0207c3 100644
--- a/test/fixtures/dialects/snowflake/snowflake_create_table.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_create_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 2f59c16d10e8bc8f466d59f6dd1c831095a7646afe8f107d0ec0288512283772
+_hash: 6253e281a173eaa117a9689da502e225a4e0257cbe8794d870bee5fc4ac9b19a
 file:
 - statement:
     create_table_statement:
@@ -23,7 +23,7 @@ file:
               cast_expression:
                 column_reference:
                   naked_identifier: VALUE
-                snowflake_semi_structured_expression:
+                semi_structured_expression:
                   colon: ':'
                   semi_structured_element: id
                 casting_operator: '::'
@@ -462,11 +462,11 @@ file:
           naked_identifier: orderstatus
           data_type:
             data_type_identifier: varchar
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '100'
-              end_bracket: )
+                end_bracket: )
           column_constraint_segment:
             keyword: default
             null_literal: 'null'
@@ -475,11 +475,11 @@ file:
           naked_identifier: price
           data_type:
             data_type_identifier: varchar
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '255'
-              end_bracket: )
+                end_bracket: )
       - end_bracket: )
     - keyword: as
     - select_statement:
@@ -490,7 +490,7 @@ file:
               cast_expression:
                 column_reference:
                   column_index_identifier_segment: $1
-                snowflake_semi_structured_expression:
+                semi_structured_expression:
                   colon: ':'
                   semi_structured_element: o_custkey
                 casting_operator: '::'
@@ -502,7 +502,7 @@ file:
               cast_expression:
                 column_reference:
                   column_index_identifier_segment: $1
-                snowflake_semi_structured_expression:
+                semi_structured_expression:
                   colon: ':'
                   semi_structured_element: o_orderdate
                 casting_operator: '::'
@@ -514,7 +514,7 @@ file:
               cast_expression:
                 column_reference:
                   column_index_identifier_segment: $1
-                snowflake_semi_structured_expression:
+                semi_structured_expression:
                   colon: ':'
                   semi_structured_element: o_orderstatus
                 casting_operator: '::'
@@ -526,7 +526,7 @@ file:
               cast_expression:
                 column_reference:
                   column_index_identifier_segment: $1
-                snowflake_semi_structured_expression:
+                semi_structured_expression:
                   colon: ':'
                   semi_structured_element: o_totalprice
                 casting_operator: '::'
@@ -857,11 +857,11 @@ file:
           quoted_identifier: '"COL1"'
           data_type:
             data_type_identifier: varchar
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '128'
-              end_bracket: )
+                end_bracket: )
           column_constraint_segment:
           - keyword: NOT
           - keyword: 'NULL'
@@ -870,11 +870,11 @@ file:
           quoted_identifier: '"COL2"'
           data_type:
             data_type_identifier: varchar
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '128'
-              end_bracket: )
+                end_bracket: )
           column_constraint_segment:
           - keyword: NOT
           - keyword: 'NULL'
diff --git a/test/fixtures/dialects/snowflake/snowflake_create_user.sql b/test/fixtures/dialects/snowflake/snowflake_create_user.sql
index 73b11ec..ae275e3 100644
--- a/test/fixtures/dialects/snowflake/snowflake_create_user.sql
+++ b/test/fixtures/dialects/snowflake/snowflake_create_user.sql
@@ -1,5 +1,24 @@
-create user user1 
-    password='abc123' 
-    default_role = myrole 
-    default_secondary_roles = ('ALL') 
+create user user1
+    password='abc123'
+    default_role = myrole
+    display_name = user1
+    login_name = my_login_name
+    first_name = User1
+    middle_name = abc
+    last_name = Test1
+    default_warehouse = my_default_warehouse
+    default_namespace = my_default_namespace
+    default_secondary_roles = ('ALL')
     must_change_password = true;
+
+create user user2
+    password='abc123'
+    default_role = 'myrole'
+    display_name = 'user 2'
+    login_name = 'test login name'
+    first_name = 'User'
+    middle_name = 'abc'
+    last_name = 'test2'
+    default_warehouse = 'my_default_warehouse'
+    default_namespace = 'my_default_namespace'
+    must_change_password = false;
diff --git a/test/fixtures/dialects/snowflake/snowflake_create_user.yml b/test/fixtures/dialects/snowflake/snowflake_create_user.yml
index 509a650..c03763f 100644
--- a/test/fixtures/dialects/snowflake/snowflake_create_user.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_create_user.yml
@@ -3,9 +3,9 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 9e874fb7a01a7da4a354be907ed5b30ab5acf5f1ba5daa20a0e8ed75e200ed50
+_hash: 22283b573b8035fbf38494b02509411ee71d9b5493e78f1c4c88b09998ca16e8
 file:
-  statement:
+- statement:
     create_user_statement:
     - keyword: create
     - keyword: user
@@ -20,6 +20,41 @@ file:
         raw_comparison_operator: '='
     - object_reference:
         naked_identifier: myrole
+    - keyword: display_name
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - object_reference:
+        naked_identifier: user1
+    - keyword: login_name
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - object_reference:
+        naked_identifier: my_login_name
+    - keyword: first_name
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - object_reference:
+        naked_identifier: User1
+    - keyword: middle_name
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - object_reference:
+        naked_identifier: abc
+    - keyword: last_name
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - object_reference:
+        naked_identifier: Test1
+    - keyword: default_warehouse
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - object_reference:
+        naked_identifier: my_default_warehouse
+    - keyword: default_namespace
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - object_reference:
+        naked_identifier: my_default_namespace
     - keyword: default_secondary_roles
     - comparison_operator:
         raw_comparison_operator: '='
@@ -31,4 +66,51 @@ file:
     - comparison_operator:
         raw_comparison_operator: '='
     - boolean_literal: 'true'
-  statement_terminator: ;
+- statement_terminator: ;
+- statement:
+    create_user_statement:
+    - keyword: create
+    - keyword: user
+    - object_reference:
+        naked_identifier: user2
+    - keyword: password
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'abc123'"
+    - keyword: default_role
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'myrole'"
+    - keyword: display_name
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'user 2'"
+    - keyword: login_name
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'test login name'"
+    - keyword: first_name
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'User'"
+    - keyword: middle_name
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'abc'"
+    - keyword: last_name
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'test2'"
+    - keyword: default_warehouse
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'my_default_warehouse'"
+    - keyword: default_namespace
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'my_default_namespace'"
+    - keyword: must_change_password
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - boolean_literal: 'false'
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/snowflake/snowflake_execute_task.sql b/test/fixtures/dialects/snowflake/snowflake_execute_task.sql
new file mode 100644
index 0000000..b18258d
--- /dev/null
+++ b/test/fixtures/dialects/snowflake/snowflake_execute_task.sql
@@ -0,0 +1 @@
+EXECUTE TASK my_task;
diff --git a/test/fixtures/dialects/snowflake/snowflake_execute_task.yml b/test/fixtures/dialects/snowflake/snowflake_execute_task.yml
new file mode 100644
index 0000000..de00ff2
--- /dev/null
+++ b/test/fixtures/dialects/snowflake/snowflake_execute_task.yml
@@ -0,0 +1,13 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 0059fba6a20ac46c320a7282d451301142de29d00a8cdf9b5aaeef45fbc4b4c4
+file:
+  statement:
+    execute_task_clause:
+    - keyword: EXECUTE
+    - keyword: TASK
+    - parameter: my_task
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/snowflake/snowflake_grant_revoke.sql b/test/fixtures/dialects/snowflake/snowflake_grant_revoke.sql
index 98fae9b..d10e7b9 100644
--- a/test/fixtures/dialects/snowflake/snowflake_grant_revoke.sql
+++ b/test/fixtures/dialects/snowflake/snowflake_grant_revoke.sql
@@ -95,3 +95,7 @@ grant create account on account to role my_role;
 grant create share on account to role my_role;
 grant create network policy on account to role my_role;
 grant create data exchange listing on account to role my_role;
+
+GRANT MANAGE ACCOUNT SUPPORT CASES ON ACCOUNT TO ROLE my_role;
+GRANT MANAGE ORGANIZATION SUPPORT CASES ON ACCOUNT TO ROLE my_role;
+GRANT MANAGE USER SUPPORT CASES ON ACCOUNT TO ROLE my_role;
diff --git a/test/fixtures/dialects/snowflake/snowflake_grant_revoke.yml b/test/fixtures/dialects/snowflake/snowflake_grant_revoke.yml
index 533b6a4..93a9c54 100644
--- a/test/fixtures/dialects/snowflake/snowflake_grant_revoke.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_grant_revoke.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: ec054d3dbcbf028019afc7e9f1a2fb6d3992fe9de5b15d0d7a78b2cefe802659
+_hash: 25596156a43ae14090219ab8f410cb86b21ff7e0077e7ef54e797d975fe21c92
 file:
 - statement:
     access_statement:
@@ -1150,3 +1150,45 @@ file:
     - role_reference:
         naked_identifier: my_role
 - statement_terminator: ;
+- statement:
+    access_statement:
+    - keyword: GRANT
+    - keyword: MANAGE
+    - keyword: ACCOUNT
+    - keyword: SUPPORT
+    - keyword: CASES
+    - keyword: 'ON'
+    - keyword: ACCOUNT
+    - keyword: TO
+    - keyword: ROLE
+    - role_reference:
+        naked_identifier: my_role
+- statement_terminator: ;
+- statement:
+    access_statement:
+    - keyword: GRANT
+    - keyword: MANAGE
+    - keyword: ORGANIZATION
+    - keyword: SUPPORT
+    - keyword: CASES
+    - keyword: 'ON'
+    - keyword: ACCOUNT
+    - keyword: TO
+    - keyword: ROLE
+    - role_reference:
+        naked_identifier: my_role
+- statement_terminator: ;
+- statement:
+    access_statement:
+    - keyword: GRANT
+    - keyword: MANAGE
+    - keyword: USER
+    - keyword: SUPPORT
+    - keyword: CASES
+    - keyword: 'ON'
+    - keyword: ACCOUNT
+    - keyword: TO
+    - keyword: ROLE
+    - role_reference:
+        naked_identifier: my_role
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/snowflake/snowflake_insert.sql b/test/fixtures/dialects/snowflake/snowflake_insert.sql
index 1304232..d91b37b 100644
--- a/test/fixtures/dialects/snowflake/snowflake_insert.sql
+++ b/test/fixtures/dialects/snowflake/snowflake_insert.sql
@@ -63,3 +63,9 @@ insert all
   when c > 10 then
     into t1 (col1, col2) values (a, b)
 select a, b, c from src;
+
+INSERT INTO foo.bar
+(
+  SELECT
+    foo.bar
+);
diff --git a/test/fixtures/dialects/snowflake/snowflake_insert.yml b/test/fixtures/dialects/snowflake/snowflake_insert.yml
index 83f6c6b..5b8c35e 100644
--- a/test/fixtures/dialects/snowflake/snowflake_insert.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_insert.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: c81a669cbc95ca3267e1921e9cc2f7ffc3c85658f61264472a624464eb8837e8
+_hash: a6ea753bffd94b5fd9c7c5829eca5a08b7a926134b1cf9510805c219aae77f38
 file:
 - statement:
     insert_statement:
@@ -582,3 +582,23 @@ file:
                 table_reference:
                   naked_identifier: src
 - statement_terminator: ;
+- statement:
+    insert_statement:
+    - keyword: INSERT
+    - keyword: INTO
+    - table_reference:
+      - naked_identifier: foo
+      - dot: .
+      - naked_identifier: bar
+    - bracketed:
+        start_bracket: (
+        select_statement:
+          select_clause:
+            keyword: SELECT
+            select_clause_element:
+              column_reference:
+              - naked_identifier: foo
+              - dot: .
+              - naked_identifier: bar
+        end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/snowflake/snowflake_json_underscore_key.yml b/test/fixtures/dialects/snowflake/snowflake_json_underscore_key.yml
index 511dc26..6fea8f3 100644
--- a/test/fixtures/dialects/snowflake/snowflake_json_underscore_key.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_json_underscore_key.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 5c0620ae6d01cd1f666991b31b575a145c2778ecb93f32506da963cd1dd3b8d1
+_hash: 8a5489149e51eeeb71a5fed628312d609c54af92e23a45bd9c812272df2568f5
 file:
   statement:
     select_statement:
@@ -16,7 +16,7 @@ file:
               - naked_identifier: x
               - dot: .
               - naked_identifier: y
-              snowflake_semi_structured_expression:
+              semi_structured_expression:
                 colon: ':'
                 semi_structured_element: _z
               casting_operator: '::'
diff --git a/test/fixtures/dialects/snowflake/snowflake_merge_into.yml b/test/fixtures/dialects/snowflake/snowflake_merge_into.yml
index 4d23e6c..f824f3f 100644
--- a/test/fixtures/dialects/snowflake/snowflake_merge_into.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_merge_into.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 7b602a6619c523b8402544ee5be40c4a95fb1bbfec9d9cc16c842ea40846cb50
+_hash: 4fc9381cf19925291a0cb4b9a028ac88f1518d8d63395677b1d2a1cbfdd6da1a
 file:
 - statement:
     alter_table_statement:
@@ -144,16 +144,17 @@ file:
             numeric_literal: '1'
         - keyword: then
         - merge_insert_clause:
-          - keyword: insert
-          - bracketed:
+            keyword: insert
+            bracketed:
               start_bracket: (
               column_reference:
                 naked_identifier: marked
               end_bracket: )
-          - keyword: values
-          - bracketed:
-              start_bracket: (
-              expression:
-                numeric_literal: '1'
-              end_bracket: )
+            values_clause:
+              keyword: values
+              bracketed:
+                start_bracket: (
+                expression:
+                  numeric_literal: '1'
+                end_bracket: )
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/snowflake/snowflake_pivot.sql b/test/fixtures/dialects/snowflake/snowflake_pivot.sql
index ca197db..ce86765 100644
--- a/test/fixtures/dialects/snowflake/snowflake_pivot.sql
+++ b/test/fixtures/dialects/snowflake/snowflake_pivot.sql
@@ -4,3 +4,10 @@ PIVOT (min(f_val) FOR f_id IN (1, 2)) AS f (a, b);
 
 SELECT * FROM my_tbl
 UNPIVOT (val FOR col_name IN (a, b));
+
+select
+*
+from table_a
+unpivot (a for b in (col_1, col_2, col_3))
+unpivot (c for d in (col_a, col_b, col_c))
+;
diff --git a/test/fixtures/dialects/snowflake/snowflake_pivot.yml b/test/fixtures/dialects/snowflake/snowflake_pivot.yml
index 5ce8d2a..4cddc24 100644
--- a/test/fixtures/dialects/snowflake/snowflake_pivot.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_pivot.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 37df5646cd5d4db6264d866fd5aa7909ccfad9e82c47235c53adc53ec510d4ca
+_hash: 57ff3afe4fe78cf0a36a3a3a561a8ebd0578fbb29eb444fdb7f84cfcd389cca3
 file:
 - statement:
     select_statement:
@@ -85,3 +85,53 @@ file:
               - end_bracket: )
             - end_bracket: )
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: select
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: from
+        from_expression:
+        - from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: table_a
+        - from_unpivot_expression:
+            keyword: unpivot
+            bracketed:
+            - start_bracket: (
+            - naked_identifier: a
+            - keyword: for
+            - naked_identifier: b
+            - keyword: in
+            - bracketed:
+              - start_bracket: (
+              - naked_identifier: col_1
+              - comma: ','
+              - naked_identifier: col_2
+              - comma: ','
+              - naked_identifier: col_3
+              - end_bracket: )
+            - end_bracket: )
+        - from_unpivot_expression:
+            keyword: unpivot
+            bracketed:
+            - start_bracket: (
+            - naked_identifier: c
+            - keyword: for
+            - naked_identifier: d
+            - keyword: in
+            - bracketed:
+              - start_bracket: (
+              - naked_identifier: col_a
+              - comma: ','
+              - naked_identifier: col_b
+              - comma: ','
+              - naked_identifier: col_c
+              - end_bracket: )
+            - end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/snowflake/snowflake_sample.sql b/test/fixtures/dialects/snowflake/snowflake_sample.sql
index a4d80e7..36cf910 100644
--- a/test/fixtures/dialects/snowflake/snowflake_sample.sql
+++ b/test/fixtures/dialects/snowflake/snowflake_sample.sql
@@ -1,4 +1,13 @@
 -- https://github.com/sqlfluff/sqlfluff/issues/547
 select *
 -- 20% sample
-from real_data sample (20)
\ No newline at end of file
+from real_data sample (20)
+;
+
+SET sample_size = 10;
+WITH dummy_data AS (
+    SELECT SEQ4() AS row_number
+    FROM TABLE(GENERATOR(rowcount => 1000))
+    ORDER BY row_number
+)
+SELECT * FROM dummy_data SAMPLE ($sample_size ROWS);
diff --git a/test/fixtures/dialects/snowflake/snowflake_sample.yml b/test/fixtures/dialects/snowflake/snowflake_sample.yml
index f693052..0d60f4c 100644
--- a/test/fixtures/dialects/snowflake/snowflake_sample.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_sample.yml
@@ -3,9 +3,9 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 610f0f38ae8691ff03201c8adc2fed5bf086cb4cff72cbf2549432eceb3c23b8
+_hash: 8fbecdbf9ac58a9c95d293d289de060182d5029064ef777bf3ef52f139d59d91
 file:
-  statement:
+- statement:
     select_statement:
       select_clause:
         keyword: select
@@ -26,3 +26,84 @@ file:
                 start_bracket: (
                 numeric_literal: '20'
                 end_bracket: )
+- statement_terminator: ;
+- statement:
+    set_statement:
+      keyword: SET
+      variable: sample_size
+      comparison_operator:
+        raw_comparison_operator: '='
+      expression:
+        numeric_literal: '10'
+- statement_terminator: ;
+- statement:
+    with_compound_statement:
+      keyword: WITH
+      common_table_expression:
+        naked_identifier: dummy_data
+        keyword: AS
+        bracketed:
+          start_bracket: (
+          select_statement:
+            select_clause:
+              keyword: SELECT
+              select_clause_element:
+                function:
+                  function_name:
+                    function_name_identifier: SEQ4
+                  bracketed:
+                    start_bracket: (
+                    end_bracket: )
+                alias_expression:
+                  keyword: AS
+                  naked_identifier: row_number
+            from_clause:
+              keyword: FROM
+              from_expression:
+                from_expression_element:
+                  table_expression:
+                    function:
+                      function_name:
+                        function_name_identifier: TABLE
+                      bracketed:
+                        start_bracket: (
+                        expression:
+                          function:
+                            function_name:
+                              function_name_identifier: GENERATOR
+                            bracketed:
+                              start_bracket: (
+                              snowflake_keyword_expression:
+                                parameter: rowcount
+                                parameter_assigner: =>
+                                numeric_literal: '1000'
+                              end_bracket: )
+                        end_bracket: )
+            orderby_clause:
+            - keyword: ORDER
+            - keyword: BY
+            - column_reference:
+                naked_identifier: row_number
+          end_bracket: )
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: dummy_data
+              sample_expression:
+                keyword: SAMPLE
+                bracketed:
+                  start_bracket: (
+                  variable: $sample_size
+                  keyword: ROWS
+                  end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/snowflake/snowflake_select.sql b/test/fixtures/dialects/snowflake/snowflake_select.sql
index 7e9d0bb..98b0691 100644
--- a/test/fixtures/dialects/snowflake/snowflake_select.sql
+++ b/test/fixtures/dialects/snowflake/snowflake_select.sql
@@ -3,3 +3,5 @@ SELECT a FROM b;
 SELECT view FROM foo;
 
 SELECT view FROM case;
+
+SELECT issue FROM issue;
diff --git a/test/fixtures/dialects/snowflake/snowflake_select.yml b/test/fixtures/dialects/snowflake/snowflake_select.yml
index 20ea0e7..79033be 100644
--- a/test/fixtures/dialects/snowflake/snowflake_select.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_select.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: d3371c3400788a60b3bbfd8ffc4da84dc3dc23a5c03a1ed418e6b389a1c1476c
+_hash: 1c7a854bb301d94fdd922bb3e1abd5abc7af67146dc135a575a04a6686eaa301
 file:
 - statement:
     select_statement:
@@ -50,3 +50,18 @@ file:
               table_reference:
                 naked_identifier: case
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          column_reference:
+            naked_identifier: issue
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: issue
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/snowflake/snowflake_select_system_function.sql b/test/fixtures/dialects/snowflake/snowflake_select_system_function.sql
new file mode 100644
index 0000000..74df407
--- /dev/null
+++ b/test/fixtures/dialects/snowflake/snowflake_select_system_function.sql
@@ -0,0 +1,2 @@
+SELECT SYSTEM$STREAM_HAS_DATA('SCH.MY_STREAM');
+SELECT SYSTEM$USER_TASK_CANCEL_ONGOING_EXECUTIONS('MY_TASK');
diff --git a/test/fixtures/dialects/snowflake/snowflake_select_system_function.yml b/test/fixtures/dialects/snowflake/snowflake_select_system_function.yml
new file mode 100644
index 0000000..f21a623
--- /dev/null
+++ b/test/fixtures/dialects/snowflake/snowflake_select_system_function.yml
@@ -0,0 +1,29 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 00fe9093281e1147d50bf758ca517889e42697e9d726d5a48ca6cf91c025e59e
+file:
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          system_function_name: SYSTEM$STREAM_HAS_DATA
+          bracketed:
+            start_bracket: (
+            quoted_literal: "'SCH.MY_STREAM'"
+            end_bracket: )
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          system_function_name: SYSTEM$USER_TASK_CANCEL_ONGOING_EXECUTIONS
+          bracketed:
+            start_bracket: (
+            quoted_literal: "'MY_TASK'"
+            end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/snowflake/snowflake_semi_structured.yml b/test/fixtures/dialects/snowflake/snowflake_semi_structured.yml
index 945b74d..2f68370 100644
--- a/test/fixtures/dialects/snowflake/snowflake_semi_structured.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_semi_structured.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 7a43b2f5826601acc62ee22325fdf43e46e5e328fdaf51b66d952c81c0a78018
+_hash: bb6046b43b0d0fc5451fc777357451cd2ebbce591a7cc1d418c8e453eb99d68d
 file:
 - statement:
     select_statement:
@@ -17,7 +17,7 @@ file:
           expression:
             column_reference:
               naked_identifier: value
-            snowflake_semi_structured_expression:
+            semi_structured_expression:
               colon: ':'
               semi_structured_element: value
           alias_expression:
@@ -29,7 +29,7 @@ file:
             cast_expression:
               column_reference:
                 naked_identifier: value
-              snowflake_semi_structured_expression:
+              semi_structured_expression:
                 colon: ':'
                 semi_structured_element: id
               casting_operator: '::'
@@ -44,7 +44,7 @@ file:
             cast_expression:
               column_reference:
                 naked_identifier: value
-              snowflake_semi_structured_expression:
+              semi_structured_expression:
                 colon: ':'
                 semi_structured_element: value
               casting_operator: '::'
@@ -58,7 +58,7 @@ file:
           expression:
             column_reference:
               naked_identifier: value
-            snowflake_semi_structured_expression:
+            semi_structured_expression:
             - colon: ':'
             - semi_structured_element: thing
             - array_accessor:
@@ -76,7 +76,7 @@ file:
             cast_expression:
               column_reference:
                 naked_identifier: value
-              snowflake_semi_structured_expression:
+              semi_structured_expression:
               - colon: ':'
               - semi_structured_element: thing
               - array_accessor:
@@ -108,7 +108,7 @@ file:
               start_square_bracket: '['
               numeric_literal: '0'
               end_square_bracket: ']'
-            snowflake_semi_structured_expression:
+            semi_structured_expression:
               dot: .
               semi_structured_element: array_element_property
           alias_expression:
@@ -147,7 +147,7 @@ file:
             cast_expression:
               column_reference:
                 naked_identifier: value
-              snowflake_semi_structured_expression:
+              semi_structured_expression:
               - colon: ':'
               - semi_structured_element: point
               - colon: ':'
@@ -157,14 +157,13 @@ file:
               casting_operator: '::'
               data_type:
                 data_type_identifier: NUMBER
-                bracketed:
-                - start_bracket: (
-                - expression:
-                    numeric_literal: '10'
-                - comma: ','
-                - expression:
-                    numeric_literal: '6'
-                - end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                  - start_bracket: (
+                  - numeric_literal: '10'
+                  - comma: ','
+                  - numeric_literal: '6'
+                  - end_bracket: )
           alias_expression:
             keyword: AS
             naked_identifier: lat
@@ -174,7 +173,7 @@ file:
             cast_expression:
               column_reference:
                 naked_identifier: value
-              snowflake_semi_structured_expression:
+              semi_structured_expression:
               - colon: ':'
               - semi_structured_element: point
               - colon: ':'
@@ -184,14 +183,13 @@ file:
               casting_operator: '::'
               data_type:
                 data_type_identifier: NUMBER
-                bracketed:
-                - start_bracket: (
-                - expression:
-                    numeric_literal: '10'
-                - comma: ','
-                - expression:
-                    numeric_literal: '6'
-                - end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                  - start_bracket: (
+                  - numeric_literal: '10'
+                  - comma: ','
+                  - numeric_literal: '6'
+                  - end_bracket: )
           alias_expression:
             keyword: AS
             naked_identifier: lng
diff --git a/test/fixtures/dialects/snowflake/snowflake_semi_structured_2.yml b/test/fixtures/dialects/snowflake/snowflake_semi_structured_2.yml
index 7fdc7e2..2ab8f80 100644
--- a/test/fixtures/dialects/snowflake/snowflake_semi_structured_2.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_semi_structured_2.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: a06452fea200f74d1b434f4a6590243c067f6685f3cdfa6493f60c03cee03637
+_hash: a720837903ff876f4ee7516d52de72abaa89209df48cac70b765bb2ab8a73073
 file:
   statement:
     select_statement:
@@ -14,7 +14,7 @@ file:
             cast_expression:
               column_reference:
                 naked_identifier: value
-              snowflake_semi_structured_expression:
+              semi_structured_expression:
               - colon: ':'
               - semi_structured_element: data
               - colon: ':'
@@ -31,7 +31,7 @@ file:
             cast_expression:
               column_reference:
                 naked_identifier: value
-              snowflake_semi_structured_expression:
+              semi_structured_expression:
               - colon: ':'
               - semi_structured_element: data
               - colon: ':'
diff --git a/test/fixtures/dialects/snowflake/snowflake_semi_structured_3.yml b/test/fixtures/dialects/snowflake/snowflake_semi_structured_3.yml
index 647f5f5..132b8e1 100644
--- a/test/fixtures/dialects/snowflake/snowflake_semi_structured_3.yml
+++ b/test/fixtures/dialects/snowflake/snowflake_semi_structured_3.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 0b936d0c53e60ea3e2218b58700181c23b632f40b7597f3e7e25da58e8686bfb
+_hash: 8d87ced2cb76bdc46947437b43aeacc28944904f5747df8f6bde8556a59c7527
 file:
   statement:
     select_statement:
@@ -48,7 +48,7 @@ file:
                   - dot: .
                   - naked_identifier: metadata
                 end_bracket: )
-            snowflake_semi_structured_expression:
+            semi_structured_expression:
               colon: ':'
               semi_structured_element: customer_id
           alias_expression:
diff --git a/test/fixtures/dialects/sparksql/add_jar.sql b/test/fixtures/dialects/sparksql/add_jar.sql
index 3602d0e..4ab5afc 100644
--- a/test/fixtures/dialects/sparksql/add_jar.sql
+++ b/test/fixtures/dialects/sparksql/add_jar.sql
@@ -14,5 +14,13 @@ ADD JAR "ivy://group:module:version?transitive=true";
 
 ADD JAR "ivy://group:module:version?exclude=group:module&transitive=true";
 
--- NB: Non-quoted paths are not supported in SQLFluff currently
---ADD JAR /tmp/test.jar;
+ADD JAR ivy://group:module:version?exclude=group:module&transitive=true;
+
+ADD JAR /path/to/some.jar;
+
+ADD JAR path/to/some.jar;
+
+ADD JAR ivy://path/to/some.jar;
+
+-- NB: Non-quoted paths do not currently support whitespaces
+-- e.g. /path to/some.jar
diff --git a/test/fixtures/dialects/sparksql/add_jar.yml b/test/fixtures/dialects/sparksql/add_jar.yml
index 4a4520e..15fcb0e 100644
--- a/test/fixtures/dialects/sparksql/add_jar.yml
+++ b/test/fixtures/dialects/sparksql/add_jar.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: a475a17704b1a019947ed3dc476aefbd6e7175ac51ea0b22a019485c01986fa3
+_hash: b7ba37b07b7588c894e6ad680fafcb954a373185f6623e703546834c8ad6990c
 file:
 - statement:
     add_jar_statement:
@@ -54,3 +54,27 @@ file:
       file_keyword: JAR
       quoted_literal: '"ivy://group:module:version?exclude=group:module&transitive=true"'
 - statement_terminator: ;
+- statement:
+    add_jar_statement:
+      keyword: ADD
+      file_keyword: JAR
+      file_literal: ivy://group:module:version?exclude=group:module&transitive=true
+- statement_terminator: ;
+- statement:
+    add_jar_statement:
+      keyword: ADD
+      file_keyword: JAR
+      file_literal: /path/to/some.jar
+- statement_terminator: ;
+- statement:
+    add_jar_statement:
+      keyword: ADD
+      file_keyword: JAR
+      file_literal: path/to/some.jar
+- statement_terminator: ;
+- statement:
+    add_jar_statement:
+      keyword: ADD
+      file_keyword: JAR
+      file_literal: ivy://path/to/some.jar
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/sparksql/alter_table.yml b/test/fixtures/dialects/sparksql/alter_table.yml
index feca679..7f2d325 100644
--- a/test/fixtures/dialects/sparksql/alter_table.yml
+++ b/test/fixtures/dialects/sparksql/alter_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 47c08b741747f2d549c6f21ef99b0c776b6d47865d93bbc022ec7feb7f0a851c
+_hash: c679500066519765699ca6c686706ac93d11997dd1e5ce0f3d09ecb47289e11c
 file:
 - statement:
     alter_table_statement:
@@ -56,13 +56,15 @@ file:
     - bracketed:
       - start_bracket: (
       - column_definition:
-          naked_identifier: LastName
+          column_reference:
+            naked_identifier: LastName
           data_type:
             primitive_type:
               keyword: STRING
       - comma: ','
       - column_definition:
-          naked_identifier: DOB
+          column_reference:
+            naked_identifier: DOB
           data_type:
             primitive_type:
               keyword: TIMESTAMP
@@ -304,7 +306,8 @@ file:
         naked_identifier: Loc_Orc
     - keyword: SET
     - keyword: FILEFORMAT
-    - keyword: ORC
+    - data_source_format:
+        keyword: ORC
 - statement_terminator: ;
 - statement:
     alter_table_statement:
@@ -329,7 +332,8 @@ file:
       - end_bracket: )
     - keyword: SET
     - keyword: FILEFORMAT
-    - keyword: PARQUET
+    - data_source_format:
+        keyword: PARQUET
 - statement_terminator: ;
 - statement:
     alter_table_statement:
diff --git a/test/fixtures/dialects/sparksql/create_table_complex_datatypes.yml b/test/fixtures/dialects/sparksql/create_table_complex_datatypes.yml
index e122f16..4055bc2 100644
--- a/test/fixtures/dialects/sparksql/create_table_complex_datatypes.yml
+++ b/test/fixtures/dialects/sparksql/create_table_complex_datatypes.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 51bbf5a827b5e2ba4aefdea69bee7821d3755ed83bf2761759a9ef850ac21bee
+_hash: 9630e84f8a1e62373243524752cc6eb7ab25f89c00d9d3c3b29fa70b2f564a4c
 file:
 - statement:
     create_table_statement:
@@ -16,43 +16,47 @@ file:
       - column_definition:
           naked_identifier: a
           data_type:
-          - keyword: STRUCT
+            struct_type:
+              keyword: STRUCT
+              struct_type_schema:
+              - start_angle_bracket: <
+              - naked_identifier: b
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: STRING
+              - comma: ','
+              - naked_identifier: c
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: BOOLEAN
+              - end_angle_bracket: '>'
+      - comma: ','
+      - column_definition:
+          naked_identifier: d
+          data_type:
+          - keyword: MAP
           - start_angle_bracket: <
-          - naked_identifier: b
-          - colon: ':'
           - data_type:
               primitive_type:
                 keyword: STRING
           - comma: ','
-          - naked_identifier: c
-          - colon: ':'
           - data_type:
               primitive_type:
                 keyword: BOOLEAN
           - end_angle_bracket: '>'
       - comma: ','
-      - column_definition:
-          naked_identifier: d
-          data_type:
-            keyword: MAP
-            start_angle_bracket: <
-            primitive_type:
-              keyword: STRING
-            comma: ','
-            data_type:
-              primitive_type:
-                keyword: BOOLEAN
-            end_angle_bracket: '>'
-      - comma: ','
       - column_definition:
           naked_identifier: e
           data_type:
-            keyword: ARRAY
-            start_angle_bracket: <
-            data_type:
-              primitive_type:
-                keyword: STRING
-            end_angle_bracket: '>'
+            array_type:
+              keyword: ARRAY
+              start_angle_bracket: <
+              data_type:
+                primitive_type:
+                  keyword: STRING
+              end_angle_bracket: '>'
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -66,22 +70,24 @@ file:
       - column_definition:
           naked_identifier: a
           data_type:
-          - keyword: STRUCT
-          - start_angle_bracket: <
-          - naked_identifier: b
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: STRING
-          - keyword: COMMENT
-          - quoted_literal: "'struct_comment'"
-          - comma: ','
-          - naked_identifier: c
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: BOOLEAN
-          - end_angle_bracket: '>'
+            struct_type:
+              keyword: STRUCT
+              struct_type_schema:
+              - start_angle_bracket: <
+              - naked_identifier: b
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: STRING
+              - keyword: COMMENT
+              - quoted_literal: "'struct_comment'"
+              - comma: ','
+              - naked_identifier: c
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: BOOLEAN
+              - end_angle_bracket: '>'
           column_constraint_segment:
             comment_clause:
               keyword: COMMENT
@@ -90,15 +96,16 @@ file:
       - column_definition:
           naked_identifier: d
           data_type:
-            keyword: MAP
-            start_angle_bracket: <
-            primitive_type:
-              keyword: STRING
-            comma: ','
-            data_type:
+          - keyword: MAP
+          - start_angle_bracket: <
+          - data_type:
+              primitive_type:
+                keyword: STRING
+          - comma: ','
+          - data_type:
               primitive_type:
                 keyword: BOOLEAN
-            end_angle_bracket: '>'
+          - end_angle_bracket: '>'
           column_constraint_segment:
             comment_clause:
               keyword: COMMENT
@@ -107,12 +114,13 @@ file:
       - column_definition:
           naked_identifier: e
           data_type:
-            keyword: ARRAY
-            start_angle_bracket: <
-            data_type:
-              primitive_type:
-                keyword: STRING
-            end_angle_bracket: '>'
+            array_type:
+              keyword: ARRAY
+              start_angle_bracket: <
+              data_type:
+                primitive_type:
+                  keyword: STRING
+              end_angle_bracket: '>'
           column_constraint_segment:
             comment_clause:
               keyword: COMMENT
@@ -130,88 +138,99 @@ file:
       - column_definition:
           naked_identifier: a
           data_type:
-          - keyword: STRUCT
+            struct_type:
+              keyword: STRUCT
+              struct_type_schema:
+              - start_angle_bracket: <
+              - naked_identifier: b
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: STRING
+              - comma: ','
+              - naked_identifier: c
+              - colon: ':'
+              - data_type:
+                - keyword: MAP
+                - start_angle_bracket: <
+                - data_type:
+                    primitive_type:
+                      keyword: STRING
+                - comma: ','
+                - data_type:
+                    primitive_type:
+                      keyword: BOOLEAN
+                - end_angle_bracket: '>'
+              - end_angle_bracket: '>'
+      - comma: ','
+      - column_definition:
+          naked_identifier: d
+          data_type:
+          - keyword: MAP
           - start_angle_bracket: <
-          - naked_identifier: b
-          - colon: ':'
           - data_type:
               primitive_type:
                 keyword: STRING
           - comma: ','
-          - naked_identifier: c
-          - colon: ':'
           - data_type:
-              keyword: MAP
-              start_angle_bracket: <
-              primitive_type:
-                keyword: STRING
-              comma: ','
-              data_type:
-                primitive_type:
-                  keyword: BOOLEAN
-              end_angle_bracket: '>'
+              struct_type:
+                keyword: STRUCT
+                struct_type_schema:
+                - start_angle_bracket: <
+                - naked_identifier: e
+                - colon: ':'
+                - data_type:
+                    primitive_type:
+                      keyword: STRING
+                - comma: ','
+                - naked_identifier: f
+                - colon: ':'
+                - data_type:
+                  - keyword: MAP
+                  - start_angle_bracket: <
+                  - data_type:
+                      primitive_type:
+                        keyword: STRING
+                  - comma: ','
+                  - data_type:
+                      primitive_type:
+                        keyword: BOOLEAN
+                  - end_angle_bracket: '>'
+                - end_angle_bracket: '>'
           - end_angle_bracket: '>'
       - comma: ','
-      - column_definition:
-          naked_identifier: d
-          data_type:
-            keyword: MAP
-            start_angle_bracket: <
-            primitive_type:
-              keyword: STRING
-            comma: ','
-            data_type:
-            - keyword: STRUCT
-            - start_angle_bracket: <
-            - naked_identifier: e
-            - colon: ':'
-            - data_type:
-                primitive_type:
-                  keyword: STRING
-            - comma: ','
-            - naked_identifier: f
-            - colon: ':'
-            - data_type:
-                keyword: MAP
-                start_angle_bracket: <
-                primitive_type:
-                  keyword: STRING
-                comma: ','
-                data_type:
-                  primitive_type:
-                    keyword: BOOLEAN
-                end_angle_bracket: '>'
-            - end_angle_bracket: '>'
-            end_angle_bracket: '>'
-      - comma: ','
       - column_definition:
           naked_identifier: g
           data_type:
-            keyword: ARRAY
-            start_angle_bracket: <
-            data_type:
-            - keyword: STRUCT
-            - start_angle_bracket: <
-            - naked_identifier: h
-            - colon: ':'
-            - data_type:
-                primitive_type:
-                  keyword: STRING
-            - comma: ','
-            - naked_identifier: i
-            - colon: ':'
-            - data_type:
-                keyword: MAP
-                start_angle_bracket: <
-                primitive_type:
-                  keyword: STRING
-                comma: ','
-                data_type:
-                  primitive_type:
-                    keyword: BOOLEAN
-                end_angle_bracket: '>'
-            - end_angle_bracket: '>'
-            end_angle_bracket: '>'
+            array_type:
+              keyword: ARRAY
+              start_angle_bracket: <
+              data_type:
+                struct_type:
+                  keyword: STRUCT
+                  struct_type_schema:
+                  - start_angle_bracket: <
+                  - naked_identifier: h
+                  - colon: ':'
+                  - data_type:
+                      primitive_type:
+                        keyword: STRING
+                  - comma: ','
+                  - naked_identifier: i
+                  - colon: ':'
+                  - data_type:
+                    - keyword: MAP
+                    - start_angle_bracket: <
+                    - data_type:
+                        primitive_type:
+                          keyword: STRING
+                    - comma: ','
+                    - data_type:
+                        primitive_type:
+                          keyword: BOOLEAN
+                    - end_angle_bracket: '>'
+                  - end_angle_bracket: '>'
+              end_angle_bracket: '>'
       - end_bracket: )
 - statement_terminator: ;
 - statement:
@@ -225,42 +244,46 @@ file:
       - column_definition:
           naked_identifier: a
           data_type:
-          - keyword: STRUCT
+            struct_type:
+              keyword: STRUCT
+              struct_type_schema:
+              - start_angle_bracket: <
+              - quoted_identifier: '`b`'
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: STRING
+              - comma: ','
+              - naked_identifier: c
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: BOOLEAN
+              - end_angle_bracket: '>'
+      - comma: ','
+      - column_definition:
+          quoted_identifier: '`d`'
+          data_type:
+          - keyword: MAP
           - start_angle_bracket: <
-          - quoted_identifier: '`b`'
-          - colon: ':'
           - data_type:
               primitive_type:
                 keyword: STRING
           - comma: ','
-          - naked_identifier: c
-          - colon: ':'
           - data_type:
               primitive_type:
                 keyword: BOOLEAN
           - end_angle_bracket: '>'
       - comma: ','
-      - column_definition:
-          quoted_identifier: '`d`'
-          data_type:
-            keyword: MAP
-            start_angle_bracket: <
-            primitive_type:
-              keyword: STRING
-            comma: ','
-            data_type:
-              primitive_type:
-                keyword: BOOLEAN
-            end_angle_bracket: '>'
-      - comma: ','
       - column_definition:
           naked_identifier: e
           data_type:
-            keyword: ARRAY
-            start_angle_bracket: <
-            data_type:
-              primitive_type:
-                keyword: STRING
-            end_angle_bracket: '>'
+            array_type:
+              keyword: ARRAY
+              start_angle_bracket: <
+              data_type:
+                primitive_type:
+                  keyword: STRING
+              end_angle_bracket: '>'
       - end_bracket: )
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/sparksql/create_table_datasource.sql b/test/fixtures/dialects/sparksql/create_table_datasource.sql
index 25b2c73..90eedb8 100644
--- a/test/fixtures/dialects/sparksql/create_table_datasource.sql
+++ b/test/fixtures/dialects/sparksql/create_table_datasource.sql
@@ -39,3 +39,13 @@ CREATE TABLE student (id INT, student_name STRING, age INT)
 USING CSV
 PARTITIONED BY (age)
 CLUSTERED BY (id) INTO 4 BUCKETS;
+
+CREATE EXTERNAL TABLE IF NOT EXISTS student (id INT, student_name STRING, age INT)
+USING iceberg
+PARTITIONED BY (age);
+
+CREATE TABLE student (id INT, student_name STRING, age INT)
+USING CSV
+COMMENT "this is a comment"
+PARTITIONED BY (age)
+STORED AS PARQUET;
diff --git a/test/fixtures/dialects/sparksql/create_table_datasource.yml b/test/fixtures/dialects/sparksql/create_table_datasource.yml
index f660434..b2c35c6 100644
--- a/test/fixtures/dialects/sparksql/create_table_datasource.yml
+++ b/test/fixtures/dialects/sparksql/create_table_datasource.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: ddf0a83aecc77d186c30f8acd2b202adf10c0c4d39343501f2f7a63a52ccbbe1
+_hash: 50ba3daf8fbee551483efd319027afab8246365b1f64835f1d92b8d5e25bd25d
 file:
 - statement:
     create_table_statement:
@@ -26,8 +26,10 @@ file:
               keyword: COMMENT
               quoted_literal: '"col_comment1"'
         end_bracket: )
-    - keyword: USING
-    - keyword: PARQUET
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: PARQUET
     - keyword: OPTIONS
     - bracketed:
       - start_bracket: (
@@ -142,8 +144,10 @@ file:
             primitive_type:
               keyword: INT
       - end_bracket: )
-    - keyword: USING
-    - keyword: CSV
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: CSV
 - statement_terminator: ;
 - statement:
     create_table_statement:
@@ -151,8 +155,10 @@ file:
     - keyword: TABLE
     - table_reference:
         naked_identifier: student_copy
-    - keyword: USING
-    - keyword: CSV
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: CSV
     - keyword: AS
     - select_statement:
         select_clause:
@@ -225,8 +231,10 @@ file:
             primitive_type:
               keyword: INT
       - end_bracket: )
-    - keyword: USING
-    - keyword: CSV
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: CSV
     - keyword: COMMENT
     - quoted_literal: "'this is a comment'"
     - keyword: TBLPROPERTIES
@@ -265,8 +273,10 @@ file:
             primitive_type:
               keyword: INT
       - end_bracket: )
-    - keyword: USING
-    - keyword: CSV
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: CSV
     - keyword: PARTITIONED
     - keyword: BY
     - bracketed:
@@ -285,3 +295,88 @@ file:
     - numeric_literal: '4'
     - keyword: BUCKETS
 - statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: EXTERNAL
+    - keyword: TABLE
+    - keyword: IF
+    - keyword: NOT
+    - keyword: EXISTS
+    - table_reference:
+        naked_identifier: student
+    - bracketed:
+      - start_bracket: (
+      - column_definition:
+          naked_identifier: id
+          data_type:
+            primitive_type:
+              keyword: INT
+      - comma: ','
+      - column_definition:
+          naked_identifier: student_name
+          data_type:
+            primitive_type:
+              keyword: STRING
+      - comma: ','
+      - column_definition:
+          naked_identifier: age
+          data_type:
+            primitive_type:
+              keyword: INT
+      - end_bracket: )
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: iceberg
+    - keyword: PARTITIONED
+    - keyword: BY
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: age
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: student
+    - bracketed:
+      - start_bracket: (
+      - column_definition:
+          naked_identifier: id
+          data_type:
+            primitive_type:
+              keyword: INT
+      - comma: ','
+      - column_definition:
+          naked_identifier: student_name
+          data_type:
+            primitive_type:
+              keyword: STRING
+      - comma: ','
+      - column_definition:
+          naked_identifier: age
+          data_type:
+            primitive_type:
+              keyword: INT
+      - end_bracket: )
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: CSV
+    - keyword: COMMENT
+    - quoted_literal: '"this is a comment"'
+    - keyword: PARTITIONED
+    - keyword: BY
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: age
+        end_bracket: )
+    - keyword: STORED
+    - keyword: AS
+    - keyword: PARQUET
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/sparksql/create_table_hiveformat.yml b/test/fixtures/dialects/sparksql/create_table_hiveformat.yml
index cc4a450..acef27c 100644
--- a/test/fixtures/dialects/sparksql/create_table_hiveformat.yml
+++ b/test/fixtures/dialects/sparksql/create_table_hiveformat.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 8381964744a2723edcf42ca3a6389fac06143cf17b41ef1794ff592b1213f03e
+_hash: ca09e9acf6052bcb32612db25cff0ca38f9bb3ae63da9e9f5aa873bbd34ae2c8
 file:
 - statement:
     create_table_statement:
@@ -370,43 +370,47 @@ file:
       - column_definition:
           naked_identifier: friends
           data_type:
-            keyword: ARRAY
-            start_angle_bracket: <
-            data_type:
-              primitive_type:
-                keyword: STRING
-            end_angle_bracket: '>'
+            array_type:
+              keyword: ARRAY
+              start_angle_bracket: <
+              data_type:
+                primitive_type:
+                  keyword: STRING
+              end_angle_bracket: '>'
       - comma: ','
       - column_definition:
           naked_identifier: children
           data_type:
-            keyword: MAP
-            start_angle_bracket: <
-            primitive_type:
-              keyword: STRING
-            comma: ','
-            data_type:
-              primitive_type:
-                keyword: INT
-            end_angle_bracket: '>'
-      - comma: ','
-      - column_definition:
-          naked_identifier: address
-          data_type:
-          - keyword: STRUCT
+          - keyword: MAP
           - start_angle_bracket: <
-          - naked_identifier: street
-          - colon: ':'
           - data_type:
               primitive_type:
                 keyword: STRING
           - comma: ','
-          - naked_identifier: city
-          - colon: ':'
           - data_type:
               primitive_type:
-                keyword: STRING
+                keyword: INT
           - end_angle_bracket: '>'
+      - comma: ','
+      - column_definition:
+          naked_identifier: address
+          data_type:
+            struct_type:
+              keyword: STRUCT
+              struct_type_schema:
+              - start_angle_bracket: <
+              - naked_identifier: street
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: STRING
+              - comma: ','
+              - naked_identifier: city
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: STRING
+              - end_angle_bracket: '>'
       - end_bracket: )
     - row_format_clause:
       - keyword: ROW
diff --git a/test/fixtures/dialects/sparksql/create_table_like.yml b/test/fixtures/dialects/sparksql/create_table_like.yml
index 6ea9164..74a6f0b 100644
--- a/test/fixtures/dialects/sparksql/create_table_like.yml
+++ b/test/fixtures/dialects/sparksql/create_table_like.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 4a9441c495d439f8c047af003862b6a20eefe224ba3cd7d57e9efffa9e4f43bb
+_hash: 0e80c3cac8e3f8c1f6caf134e0df5bbbbe78906073995922778be82d8ff71f76
 file:
 - statement:
     create_table_statement:
@@ -17,8 +17,10 @@ file:
     - keyword: LIKE
     - table_reference:
         naked_identifier: source_table_identifier
-    - keyword: USING
-    - keyword: PARQUET
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: PARQUET
     - row_format_clause:
       - keyword: ROW
       - keyword: FORMAT
@@ -67,8 +69,10 @@ file:
     - keyword: LIKE
     - table_reference:
         naked_identifier: student
-    - keyword: USING
-    - keyword: CSV
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: CSV
 - statement_terminator: ;
 - statement:
     create_table_statement:
diff --git a/test/fixtures/dialects/sparksql/create_table_primitive_datatypes.sql b/test/fixtures/dialects/sparksql/create_table_primitive_datatypes.sql
new file mode 100644
index 0000000..dd63ba3
--- /dev/null
+++ b/test/fixtures/dialects/sparksql/create_table_primitive_datatypes.sql
@@ -0,0 +1,2 @@
+CREATE TABLE table_identifier
+( a LONG, b INT, c SMALLINT);
diff --git a/test/fixtures/dialects/sparksql/create_table_primitive_datatypes.yml b/test/fixtures/dialects/sparksql/create_table_primitive_datatypes.yml
new file mode 100644
index 0000000..49df2e5
--- /dev/null
+++ b/test/fixtures/dialects/sparksql/create_table_primitive_datatypes.yml
@@ -0,0 +1,34 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: d8f20b8c155640694cd84362ac48a58f7ee08fb6a53bb6636bbaf51d260f107d
+file:
+  statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: table_identifier
+    - bracketed:
+      - start_bracket: (
+      - column_definition:
+          naked_identifier: a
+          data_type:
+            primitive_type:
+              keyword: LONG
+      - comma: ','
+      - column_definition:
+          naked_identifier: b
+          data_type:
+            primitive_type:
+              keyword: INT
+      - comma: ','
+      - column_definition:
+          naked_identifier: c
+          data_type:
+            primitive_type:
+              keyword: SMALLINT
+      - end_bracket: )
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/sparksql/create_view.sql b/test/fixtures/dialects/sparksql/create_view.sql
index 40c034e..f35733f 100644
--- a/test/fixtures/dialects/sparksql/create_view.sql
+++ b/test/fixtures/dialects/sparksql/create_view.sql
@@ -15,3 +15,12 @@ SELECT * from experienced_employee limit 2 ;
 -- Replace the implementation of `simple_udf`
 CREATE OR REPLACE VIEW experienced_employee_rep AS
 SELECT * from experienced_employee limit 2 ;
+
+CREATE TEMPORARY VIEW pulse_article_search_data
+    USING org.apache.spark.sql.jdbc
+    OPTIONS (
+  url "jdbc:postgresql:dbserver",
+  dbtable "schema.tablename",
+  user 'username',
+  password 'password'
+)
diff --git a/test/fixtures/dialects/sparksql/create_view.yml b/test/fixtures/dialects/sparksql/create_view.yml
index 40da1ff..3704584 100644
--- a/test/fixtures/dialects/sparksql/create_view.yml
+++ b/test/fixtures/dialects/sparksql/create_view.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: b2533ee6f925cd7b21d6150acbe56fdae6a2b46b2654f08c08bf26b7efa7d16d
+_hash: 9707e977b1efba1e0a3ad720e525968b4b83816679c979340c02dbc037a3c47b
 file:
 - statement:
     create_view_statement:
@@ -139,3 +139,41 @@ file:
           keyword: limit
           numeric_literal: '2'
 - statement_terminator: ;
+- statement:
+    create_view_statement:
+    - keyword: CREATE
+    - keyword: TEMPORARY
+    - keyword: VIEW
+    - table_reference:
+        naked_identifier: pulse_article_search_data
+    - keyword: USING
+    - data_source_format:
+        object_reference:
+        - naked_identifier: org
+        - dot: .
+        - naked_identifier: apache
+        - dot: .
+        - naked_identifier: spark
+        - dot: .
+        - naked_identifier: sql
+        - dot: .
+        - naked_identifier: jdbc
+    - keyword: OPTIONS
+    - bracketed:
+      - start_bracket: (
+      - property_name_identifier:
+          properties_naked_identifier: url
+      - quoted_literal: '"jdbc:postgresql:dbserver"'
+      - comma: ','
+      - property_name_identifier:
+          properties_naked_identifier: dbtable
+      - quoted_literal: '"schema.tablename"'
+      - comma: ','
+      - property_name_identifier:
+          properties_naked_identifier: user
+      - quoted_literal: "'username'"
+      - comma: ','
+      - property_name_identifier:
+          properties_naked_identifier: password
+      - quoted_literal: "'password'"
+      - end_bracket: )
diff --git a/test/fixtures/dialects/sparksql/databricks_operator_colon_sign.yml b/test/fixtures/dialects/sparksql/databricks_operator_colon_sign.yml
index 19983bc..ffea8ee 100644
--- a/test/fixtures/dialects/sparksql/databricks_operator_colon_sign.yml
+++ b/test/fixtures/dialects/sparksql/databricks_operator_colon_sign.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 99110a6c45b5907ccf64930fcc76e4653b5c58bc49fb4fbeab1279ad6d07c818
+_hash: 677133458db91d62118a67dbd7c1bcf81c3606396175e5e7de01565c67b601ed
 file:
 - statement:
     select_statement:
@@ -54,12 +54,13 @@ file:
               data_type:
                 primitive_type:
                   keyword: DECIMAL
-                  bracketed:
-                  - start_bracket: (
-                  - numeric_literal: '5'
-                  - comma: ','
-                  - numeric_literal: '2'
-                  - end_bracket: )
+                  bracketed_arguments:
+                    bracketed:
+                    - start_bracket: (
+                    - numeric_literal: '5'
+                    - comma: ','
+                    - numeric_literal: '2'
+                    - end_bracket: )
       from_clause:
         keyword: FROM
         from_expression:
diff --git a/test/fixtures/dialects/sparksql/delta_create_table.yml b/test/fixtures/dialects/sparksql/delta_create_table.yml
index ceeda32..b4f840f 100644
--- a/test/fixtures/dialects/sparksql/delta_create_table.yml
+++ b/test/fixtures/dialects/sparksql/delta_create_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: fa7337a921e487f7776e8da6a78aa388f08e3e68f24b66267800800b6e757ee9
+_hash: 0383611d0a30333c17e59452d7179e43e5fa4775f31282d3dc3bf41a19bf53ad
 file:
 - statement:
     create_table_statement:
@@ -66,8 +66,10 @@ file:
             primitive_type:
               keyword: INT
       - end_bracket: )
-    - keyword: USING
-    - keyword: DELTA
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: DELTA
 - statement_terminator: ;
 - statement:
     create_table_statement:
@@ -129,8 +131,10 @@ file:
             primitive_type:
               keyword: INT
       - end_bracket: )
-    - keyword: USING
-    - keyword: DELTA
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: DELTA
 - statement_terminator: ;
 - statement:
     create_table_statement:
@@ -192,8 +196,10 @@ file:
             primitive_type:
               keyword: INT
       - end_bracket: )
-    - keyword: USING
-    - keyword: DELTA
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: DELTA
 - statement_terminator: ;
 - statement:
     create_table_statement:
@@ -253,8 +259,10 @@ file:
             primitive_type:
               keyword: INT
       - end_bracket: )
-    - keyword: USING
-    - keyword: DELTA
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: DELTA
     - keyword: PARTITIONED
     - keyword: BY
     - bracketed:
@@ -271,8 +279,10 @@ file:
       - naked_identifier: default
       - dot: .
       - naked_identifier: people10m
-    - keyword: USING
-    - keyword: DELTA
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: DELTA
     - keyword: LOCATION
     - quoted_literal: "'/delta/people10m'"
 - statement_terminator: ;
@@ -359,8 +369,10 @@ file:
             primitive_type:
               keyword: INT
       - end_bracket: )
-    - keyword: USING
-    - keyword: DELTA
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: DELTA
     - keyword: PARTITIONED
     - keyword: BY
     - bracketed:
diff --git a/test/fixtures/dialects/sparksql/delta_update_table_schema.yml b/test/fixtures/dialects/sparksql/delta_update_table_schema.yml
index 132f8fd..d439324 100644
--- a/test/fixtures/dialects/sparksql/delta_update_table_schema.yml
+++ b/test/fixtures/dialects/sparksql/delta_update_table_schema.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 63ea3f968b4add320516f1caca79f87260d6f9e5f221ae9d11b739b4767ef2d1
+_hash: 209c3612244aa11fa9c18b0eea34c906d0f38105c9ae65330fa8867fdf2d23dd
 file:
 - statement:
     alter_table_statement:
@@ -14,7 +14,8 @@ file:
     - keyword: ADD
     - keyword: COLUMNS
     - column_definition:
-        naked_identifier: col_name
+        column_reference:
+          naked_identifier: col_name
         data_type:
           primitive_type:
             keyword: STRING
@@ -30,7 +31,8 @@ file:
     - bracketed:
         start_bracket: (
         column_definition:
-          naked_identifier: col_name
+          column_reference:
+            naked_identifier: col_name
           data_type:
             primitive_type:
               keyword: STRING
@@ -45,13 +47,15 @@ file:
     - keyword: ADD
     - keyword: COLUMNS
     - column_definition:
-        naked_identifier: col_name
+        column_reference:
+          naked_identifier: col_name
         data_type:
           primitive_type:
             keyword: STRING
     - comma: ','
     - column_definition:
-        naked_identifier: col_name2
+        column_reference:
+          naked_identifier: col_name2
         data_type:
           primitive_type:
             keyword: INT
@@ -65,7 +69,8 @@ file:
     - keyword: ADD
     - keyword: COLUMNS
     - column_definition:
-        naked_identifier: col_name
+        column_reference:
+          naked_identifier: col_name
         data_type:
           primitive_type:
             keyword: STRING
@@ -84,7 +89,8 @@ file:
     - keyword: ADD
     - keyword: COLUMNS
     - column_definition:
-        naked_identifier: col_name
+        column_reference:
+          naked_identifier: col_name
         data_type:
           primitive_type:
             keyword: STRING
@@ -95,7 +101,8 @@ file:
     - keyword: FIRST
     - comma: ','
     - column_definition:
-        naked_identifier: col_name2
+        column_reference:
+          naked_identifier: col_name2
         data_type:
           primitive_type:
             keyword: INT
@@ -354,26 +361,28 @@ file:
       - column_definition:
           naked_identifier: col_b
           data_type:
-          - keyword: STRUCT
-          - start_angle_bracket: <
-          - naked_identifier: key2
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: STRING
-          - comma: ','
-          - naked_identifier: nested
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: STRING
-          - comma: ','
-          - naked_identifier: key1
-          - colon: ':'
-          - data_type:
-              primitive_type:
-                keyword: STRING
-          - end_angle_bracket: '>'
+            struct_type:
+              keyword: STRUCT
+              struct_type_schema:
+              - start_angle_bracket: <
+              - naked_identifier: key2
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: STRING
+              - comma: ','
+              - naked_identifier: nested
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: STRING
+              - comma: ','
+              - naked_identifier: key1
+              - colon: ':'
+              - data_type:
+                  primitive_type:
+                    keyword: STRING
+              - end_angle_bracket: '>'
       - comma: ','
       - column_definition:
           naked_identifier: col_a
diff --git a/test/fixtures/dialects/sparksql/describe_database.sql b/test/fixtures/dialects/sparksql/describe_database.sql
index c223545..b659053 100644
--- a/test/fixtures/dialects/sparksql/describe_database.sql
+++ b/test/fixtures/dialects/sparksql/describe_database.sql
@@ -3,3 +3,6 @@ DESCRIBE DATABASE employees;
 DESCRIBE DATABASE EXTENDED employees;
 
 DESC DATABASE deployment;
+
+-- Keywords SCHEMA and DATABASE are interchangeable.
+DESCRIBE SCHEMA employees;
diff --git a/test/fixtures/dialects/sparksql/describe_database.yml b/test/fixtures/dialects/sparksql/describe_database.yml
index bb55fd2..7d84a04 100644
--- a/test/fixtures/dialects/sparksql/describe_database.yml
+++ b/test/fixtures/dialects/sparksql/describe_database.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 62cf038f75d11eb1e92a55b7e4003d3a9bfdb8edb5cfaeb42e541d81a760d56d
+_hash: e519773c48713b10590187317f664206adf50309a956a1b182dbd5bc46b34917
 file:
 - statement:
     describe_statement:
@@ -27,3 +27,10 @@ file:
     - database_reference:
         naked_identifier: deployment
 - statement_terminator: ;
+- statement:
+    describe_statement:
+    - keyword: DESCRIBE
+    - keyword: SCHEMA
+    - database_reference:
+        naked_identifier: employees
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/sparksql/explain.yml b/test/fixtures/dialects/sparksql/explain.yml
index ffaaded..470ea02 100644
--- a/test/fixtures/dialects/sparksql/explain.yml
+++ b/test/fixtures/dialects/sparksql/explain.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: e8419b6c809320f4b5a85b9aefbd1e6867c23f2e9753dfbda7191afbd87630dd
+_hash: 857fcc5285757408c4a45ef6f3114b118d98f95080003349d3dd8da1b5a5af6e
 file:
 - statement:
     explain_statement:
@@ -195,8 +195,10 @@ file:
                 primitive_type:
                   keyword: INT
           - end_bracket: )
-        - keyword: USING
-        - keyword: CSV
+        - using_clause:
+            keyword: USING
+            data_source_format:
+              keyword: CSV
 - statement_terminator: ;
 - statement:
     explain_statement:
diff --git a/test/fixtures/dialects/sparksql/iceberg_alter_table.sql b/test/fixtures/dialects/sparksql/iceberg_alter_table.sql
new file mode 100644
index 0000000..dd5d998
--- /dev/null
+++ b/test/fixtures/dialects/sparksql/iceberg_alter_table.sql
@@ -0,0 +1,123 @@
+-- Iceberg Spark DDL Alter Statements https://iceberg.apache.org/docs/latest/spark-ddl/#alter-table
+
+ALTER TABLE prod.db.sample RENAME TO prod.db.new_name;
+
+ALTER TABLE prod.db.sample SET TBLPROPERTIES (
+    'read.split.target-size'='268435456'
+);
+
+ALTER TABLE prod.db.sample UNSET TBLPROPERTIES ('read.split.target-size');
+
+ALTER TABLE prod.db.sample SET TBLPROPERTIES (
+    'comment' = 'A table comment.'
+);
+
+ALTER TABLE prod.db.sample
+ADD COLUMNS (
+    new_column string comment 'new_column docs'
+  );
+
+-- create a struct column
+ALTER TABLE prod.db.sample
+ADD COLUMN point struct<x: double, y: double>;
+
+-- add a field to the struct
+ALTER TABLE prod.db.sample
+ADD COLUMN point.z double;
+
+-- create a nested array column of struct
+ALTER TABLE prod.db.sample
+ADD COLUMN points array<struct<x: double, y: double>>;
+
+-- add a field to the struct within an array. Using keyword 'element' to access the array's element column.
+ALTER TABLE prod.db.sample
+ADD COLUMN points.element.z double;
+
+-- create a map column of struct key and struct value
+ALTER TABLE prod.db.sample
+ADD COLUMN points map<struct<x: int>, struct<a: int>>;
+
+-- add a field to the value struct in a map. Using keyword 'value' to access the map's value column.
+ALTER TABLE prod.db.sample
+ADD COLUMN points.value.b int;
+
+ALTER TABLE prod.db.sample
+ADD COLUMN new_column bigint AFTER other_column;
+
+ALTER TABLE prod.db.sample
+ADD COLUMN nested.new_column bigint FIRST;
+
+ALTER TABLE prod.db.sample RENAME COLUMN data TO payload;
+
+ALTER TABLE prod.db.sample RENAME COLUMN location.lat TO latitude;
+
+ALTER TABLE prod.db.sample ALTER COLUMN measurement TYPE double;
+
+ALTER TABLE prod.db.sample ALTER COLUMN measurement TYPE double COMMENT 'unit is bytes per second';
+
+ALTER TABLE prod.db.sample ALTER COLUMN measurement COMMENT 'unit is kilobytes per second';
+
+ALTER TABLE prod.db.sample ALTER COLUMN col FIRST;
+
+ALTER TABLE prod.db.sample ALTER COLUMN nested.col AFTER other_col;
+
+ALTER TABLE prod.db.sample ALTER COLUMN id DROP NOT NULL;
+
+ALTER TABLE prod.db.sample DROP COLUMN id;
+
+ALTER TABLE prod.db.sample DROP COLUMN point.z;
+
+ALTER TABLE prod.db.sample ADD PARTITION FIELD catalog; -- identity transform
+
+ALTER TABLE prod.db.sample ADD PARTITION FIELD bucket(16, id);
+
+ALTER TABLE prod.db.sample ADD PARTITION FIELD truncate(4, data);
+
+ALTER TABLE prod.db.sample ADD PARTITION FIELD years(ts);
+
+-- use optional AS keyword to specify a custom name for the partition field 
+ALTER TABLE prod.db.sample ADD PARTITION FIELD bucket(16, id) AS shard;
+
+ALTER TABLE prod.db.sample DROP PARTITION FIELD catalog;
+
+ALTER TABLE prod.db.sample DROP PARTITION FIELD bucket(16, id);
+
+ALTER TABLE prod.db.sample DROP PARTITION FIELD truncate(4, data);
+
+ALTER TABLE prod.db.sample DROP PARTITION FIELD years(ts);
+
+ALTER TABLE prod.db.sample DROP PARTITION FIELD shard;
+
+ALTER TABLE prod.db.sample REPLACE PARTITION FIELD ts_day WITH days(ts);
+
+-- use optional AS keyword to specify a custom name for the new partition field 
+ALTER TABLE prod.db.sample REPLACE PARTITION FIELD ts_day WITH days(ts) AS day_of_ts;
+
+ALTER TABLE prod.db.sample WRITE ORDERED BY category, id;
+
+-- use optional ASC/DEC keyword to specify sort order of each field (default ASC)
+ALTER TABLE prod.db.sample WRITE ORDERED BY category ASC, id DESC;
+
+-- use optional NULLS FIRST/NULLS LAST keyword to specify null order of each field (default FIRST)
+ALTER TABLE prod.db.sample WRITE ORDERED BY category ASC NULLS LAST, id DESC NULLS FIRST;
+
+ALTER TABLE prod.db.sample WRITE LOCALLY ORDERED BY category, id;
+
+ALTER TABLE prod.db.sample WRITE DISTRIBUTED BY PARTITION;
+
+ALTER TABLE prod.db.sample WRITE DISTRIBUTED BY PARTITION LOCALLY ORDERED BY category, id;
+
+-- single column
+ALTER TABLE prod.db.sample SET IDENTIFIER FIELDS id;
+
+-- multiple columns
+ALTER TABLE prod.db.sample SET IDENTIFIER FIELDS id, data;
+
+-- single column
+ALTER TABLE prod.db.sample DROP IDENTIFIER FIELDS id;
+
+-- multiple columns
+ALTER TABLE prod.db.sample DROP IDENTIFIER FIELDS id, data
+
+
+
diff --git a/test/fixtures/dialects/sparksql/iceberg_alter_table.yml b/test/fixtures/dialects/sparksql/iceberg_alter_table.yml
new file mode 100644
index 0000000..793b56d
--- /dev/null
+++ b/test/fixtures/dialects/sparksql/iceberg_alter_table.yml
@@ -0,0 +1,967 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 1aaa78d9b62ca8de52d21da4d3dc2db40daecfe19b36c8d82db0a94f922a48b3
+file:
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: RENAME
+    - keyword: TO
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: new_name
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: SET
+    - keyword: TBLPROPERTIES
+    - bracketed:
+        start_bracket: (
+        property_name_identifier:
+          quoted_identifier: "'read.split.target-size'"
+        comparison_operator:
+          raw_comparison_operator: '='
+        quoted_literal: "'268435456'"
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: UNSET
+    - keyword: TBLPROPERTIES
+    - bracketed:
+        start_bracket: (
+        property_name_identifier:
+          quoted_identifier: "'read.split.target-size'"
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: SET
+    - keyword: TBLPROPERTIES
+    - bracketed:
+        start_bracket: (
+        property_name_identifier:
+          quoted_identifier: "'comment'"
+        comparison_operator:
+          raw_comparison_operator: '='
+        quoted_literal: "'A table comment.'"
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ADD
+    - keyword: COLUMNS
+    - bracketed:
+        start_bracket: (
+        column_definition:
+          column_reference:
+            naked_identifier: new_column
+          data_type:
+            primitive_type:
+              keyword: string
+          column_constraint_segment:
+            comment_clause:
+              keyword: comment
+              quoted_literal: "'new_column docs'"
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ADD
+    - keyword: COLUMN
+    - column_definition:
+        column_reference:
+          naked_identifier: point
+        data_type:
+          struct_type:
+            keyword: struct
+            struct_type_schema:
+            - start_angle_bracket: <
+            - naked_identifier: x
+            - colon: ':'
+            - data_type:
+                primitive_type:
+                  keyword: double
+            - comma: ','
+            - naked_identifier: y
+            - colon: ':'
+            - data_type:
+                primitive_type:
+                  keyword: double
+            - end_angle_bracket: '>'
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ADD
+    - keyword: COLUMN
+    - column_definition:
+        column_reference:
+        - naked_identifier: point
+        - dot: .
+        - naked_identifier: z
+        data_type:
+          primitive_type:
+            keyword: double
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ADD
+    - keyword: COLUMN
+    - column_definition:
+        column_reference:
+          naked_identifier: points
+        data_type:
+          array_type:
+            keyword: array
+            start_angle_bracket: <
+            data_type:
+              struct_type:
+                keyword: struct
+                struct_type_schema:
+                - start_angle_bracket: <
+                - naked_identifier: x
+                - colon: ':'
+                - data_type:
+                    primitive_type:
+                      keyword: double
+                - comma: ','
+                - naked_identifier: y
+                - colon: ':'
+                - data_type:
+                    primitive_type:
+                      keyword: double
+                - end_angle_bracket: '>'
+            end_angle_bracket: '>'
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ADD
+    - keyword: COLUMN
+    - column_definition:
+        column_reference:
+        - naked_identifier: points
+        - dot: .
+        - naked_identifier: element
+        - dot: .
+        - naked_identifier: z
+        data_type:
+          primitive_type:
+            keyword: double
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ADD
+    - keyword: COLUMN
+    - column_definition:
+        column_reference:
+          naked_identifier: points
+        data_type:
+        - keyword: map
+        - start_angle_bracket: <
+        - data_type:
+            struct_type:
+              keyword: struct
+              struct_type_schema:
+                start_angle_bracket: <
+                naked_identifier: x
+                colon: ':'
+                data_type:
+                  primitive_type:
+                    keyword: int
+                end_angle_bracket: '>'
+        - comma: ','
+        - data_type:
+            struct_type:
+              keyword: struct
+              struct_type_schema:
+                start_angle_bracket: <
+                naked_identifier: a
+                colon: ':'
+                data_type:
+                  primitive_type:
+                    keyword: int
+                end_angle_bracket: '>'
+        - end_angle_bracket: '>'
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ADD
+    - keyword: COLUMN
+    - column_definition:
+        column_reference:
+        - naked_identifier: points
+        - dot: .
+        - naked_identifier: value
+        - dot: .
+        - naked_identifier: b
+        data_type:
+          primitive_type:
+            keyword: int
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ADD
+    - keyword: COLUMN
+    - column_definition:
+        column_reference:
+          naked_identifier: new_column
+        data_type:
+          primitive_type:
+            keyword: bigint
+    - keyword: AFTER
+    - column_reference:
+        naked_identifier: other_column
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ADD
+    - keyword: COLUMN
+    - column_definition:
+        column_reference:
+        - naked_identifier: nested
+        - dot: .
+        - naked_identifier: new_column
+        data_type:
+          primitive_type:
+            keyword: bigint
+    - keyword: FIRST
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: RENAME
+    - keyword: COLUMN
+    - column_reference:
+        naked_identifier: data
+    - keyword: TO
+    - column_reference:
+        naked_identifier: payload
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: RENAME
+    - keyword: COLUMN
+    - column_reference:
+      - naked_identifier: location
+      - dot: .
+      - naked_identifier: lat
+    - keyword: TO
+    - column_reference:
+        naked_identifier: latitude
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ALTER
+    - keyword: COLUMN
+    - column_reference:
+        naked_identifier: measurement
+    - keyword: TYPE
+    - data_type:
+        primitive_type:
+          keyword: double
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ALTER
+    - keyword: COLUMN
+    - column_reference:
+        naked_identifier: measurement
+    - keyword: TYPE
+    - data_type:
+        primitive_type:
+          keyword: double
+    - keyword: COMMENT
+    - quoted_literal: "'unit is bytes per second'"
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ALTER
+    - keyword: COLUMN
+    - column_reference:
+        naked_identifier: measurement
+    - keyword: COMMENT
+    - quoted_literal: "'unit is kilobytes per second'"
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ALTER
+    - keyword: COLUMN
+    - column_reference:
+        naked_identifier: col
+    - keyword: FIRST
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ALTER
+    - keyword: COLUMN
+    - column_reference:
+      - naked_identifier: nested
+      - dot: .
+      - naked_identifier: col
+    - keyword: AFTER
+    - column_reference:
+        naked_identifier: other_col
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ALTER
+    - keyword: COLUMN
+    - column_reference:
+        naked_identifier: id
+    - keyword: DROP
+    - keyword: NOT
+    - keyword: 'NULL'
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: DROP
+    - keyword: COLUMN
+    - column_reference:
+        naked_identifier: id
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: DROP
+    - keyword: COLUMN
+    - column_reference:
+      - naked_identifier: point
+      - dot: .
+      - naked_identifier: z
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ADD
+    - keyword: PARTITION
+    - keyword: FIELD
+    - column_reference:
+        naked_identifier: catalog
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ADD
+    - keyword: PARTITION
+    - keyword: FIELD
+    - iceberg_transformation:
+        keyword: bucket
+        bracketed:
+          start_bracket: (
+          numeric_literal: '16'
+          comma: ','
+          column_reference:
+            naked_identifier: id
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ADD
+    - keyword: PARTITION
+    - keyword: FIELD
+    - iceberg_transformation:
+        keyword: truncate
+        bracketed:
+          start_bracket: (
+          numeric_literal: '4'
+          comma: ','
+          column_reference:
+            naked_identifier: data
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ADD
+    - keyword: PARTITION
+    - keyword: FIELD
+    - iceberg_transformation:
+        keyword: years
+        bracketed:
+          start_bracket: (
+          column_reference:
+            naked_identifier: ts
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: ADD
+    - keyword: PARTITION
+    - keyword: FIELD
+    - iceberg_transformation:
+        keyword: bucket
+        bracketed:
+          start_bracket: (
+          numeric_literal: '16'
+          comma: ','
+          column_reference:
+            naked_identifier: id
+          end_bracket: )
+    - keyword: AS
+    - naked_identifier: shard
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: DROP
+    - keyword: PARTITION
+    - keyword: FIELD
+    - column_reference:
+        naked_identifier: catalog
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: DROP
+    - keyword: PARTITION
+    - keyword: FIELD
+    - iceberg_transformation:
+        keyword: bucket
+        bracketed:
+          start_bracket: (
+          numeric_literal: '16'
+          comma: ','
+          column_reference:
+            naked_identifier: id
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: DROP
+    - keyword: PARTITION
+    - keyword: FIELD
+    - iceberg_transformation:
+        keyword: truncate
+        bracketed:
+          start_bracket: (
+          numeric_literal: '4'
+          comma: ','
+          column_reference:
+            naked_identifier: data
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: DROP
+    - keyword: PARTITION
+    - keyword: FIELD
+    - iceberg_transformation:
+        keyword: years
+        bracketed:
+          start_bracket: (
+          column_reference:
+            naked_identifier: ts
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: DROP
+    - keyword: PARTITION
+    - keyword: FIELD
+    - column_reference:
+        naked_identifier: shard
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: REPLACE
+    - keyword: PARTITION
+    - keyword: FIELD
+    - column_reference:
+        naked_identifier: ts_day
+    - keyword: WITH
+    - iceberg_transformation:
+        keyword: days
+        bracketed:
+          start_bracket: (
+          column_reference:
+            naked_identifier: ts
+          end_bracket: )
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: REPLACE
+    - keyword: PARTITION
+    - keyword: FIELD
+    - column_reference:
+        naked_identifier: ts_day
+    - keyword: WITH
+    - iceberg_transformation:
+        keyword: days
+        bracketed:
+          start_bracket: (
+          column_reference:
+            naked_identifier: ts
+          end_bracket: )
+    - keyword: AS
+    - naked_identifier: day_of_ts
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: WRITE
+    - keyword: ORDERED
+    - keyword: BY
+    - column_reference:
+        naked_identifier: category
+    - comma: ','
+    - column_reference:
+        naked_identifier: id
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: WRITE
+    - keyword: ORDERED
+    - keyword: BY
+    - column_reference:
+        naked_identifier: category
+    - keyword: ASC
+    - comma: ','
+    - column_reference:
+        naked_identifier: id
+    - keyword: DESC
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: WRITE
+    - keyword: ORDERED
+    - keyword: BY
+    - column_reference:
+        naked_identifier: category
+    - keyword: ASC
+    - keyword: NULLS
+    - keyword: LAST
+    - comma: ','
+    - column_reference:
+        naked_identifier: id
+    - keyword: DESC
+    - keyword: NULLS
+    - keyword: FIRST
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: WRITE
+    - keyword: LOCALLY
+    - keyword: ORDERED
+    - keyword: BY
+    - column_reference:
+        naked_identifier: category
+    - comma: ','
+    - column_reference:
+        naked_identifier: id
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: WRITE
+    - keyword: DISTRIBUTED
+    - keyword: BY
+    - keyword: PARTITION
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: WRITE
+    - keyword: DISTRIBUTED
+    - keyword: BY
+    - keyword: PARTITION
+    - keyword: LOCALLY
+    - keyword: ORDERED
+    - keyword: BY
+    - column_reference:
+        naked_identifier: category
+    - comma: ','
+    - column_reference:
+        naked_identifier: id
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: SET
+    - keyword: IDENTIFIER
+    - keyword: FIELDS
+    - column_reference:
+        naked_identifier: id
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: SET
+    - keyword: IDENTIFIER
+    - keyword: FIELDS
+    - column_reference:
+        naked_identifier: id
+    - comma: ','
+    - column_reference:
+        naked_identifier: data
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: DROP
+    - keyword: IDENTIFIER
+    - keyword: FIELDS
+    - column_reference:
+        naked_identifier: id
+- statement_terminator: ;
+- statement:
+    alter_table_statement:
+    - keyword: ALTER
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - keyword: DROP
+    - keyword: IDENTIFIER
+    - keyword: FIELDS
+    - column_reference:
+        naked_identifier: id
+    - comma: ','
+    - column_reference:
+        naked_identifier: data
diff --git a/test/fixtures/dialects/sparksql/iceberg_create_table.sql b/test/fixtures/dialects/sparksql/iceberg_create_table.sql
new file mode 100644
index 0000000..1493f1f
--- /dev/null
+++ b/test/fixtures/dialects/sparksql/iceberg_create_table.sql
@@ -0,0 +1,27 @@
+-- Iceberg Spark DDL Create Table Statements https://iceberg.apache.org/docs/latest/spark-ddl/#create-table
+
+CREATE TABLE prod.db.sample (
+    id bigint COMMENT 'unique id',
+    data string)
+USING iceberg;
+
+CREATE TABLE prod.db.sample (
+    id bigint,
+    data string,
+    category string)
+USING iceberg
+PARTITIONED BY (category);
+
+CREATE TABLE prod.db.sample (
+    id bigint,
+    data string,
+    category string,
+    ts timestamp)
+USING iceberg
+PARTITIONED BY (bucket(16, id), days(ts), category);
+
+CREATE TABLE prod.db.sample
+USING iceberg
+PARTITIONED BY (part)
+TBLPROPERTIES ('key'='value');
+
diff --git a/test/fixtures/dialects/sparksql/iceberg_create_table.yml b/test/fixtures/dialects/sparksql/iceberg_create_table.yml
new file mode 100644
index 0000000..93034f4
--- /dev/null
+++ b/test/fixtures/dialects/sparksql/iceberg_create_table.yml
@@ -0,0 +1,179 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 02a96866985a7d40d12108ccd412b6e7a79ffc1f663d3eabca1f10edb8ccf66c
+file:
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - bracketed:
+      - start_bracket: (
+      - column_definition:
+          naked_identifier: id
+          data_type:
+            primitive_type:
+              keyword: bigint
+          column_constraint_segment:
+            comment_clause:
+              keyword: COMMENT
+              quoted_literal: "'unique id'"
+      - comma: ','
+      - column_definition:
+          naked_identifier: data
+          data_type:
+            primitive_type:
+              keyword: string
+      - end_bracket: )
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: iceberg
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - bracketed:
+      - start_bracket: (
+      - column_definition:
+          naked_identifier: id
+          data_type:
+            primitive_type:
+              keyword: bigint
+      - comma: ','
+      - column_definition:
+          naked_identifier: data
+          data_type:
+            primitive_type:
+              keyword: string
+      - comma: ','
+      - column_definition:
+          naked_identifier: category
+          data_type:
+            primitive_type:
+              keyword: string
+      - end_bracket: )
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: iceberg
+    - keyword: PARTITIONED
+    - keyword: BY
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: category
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - bracketed:
+      - start_bracket: (
+      - column_definition:
+          naked_identifier: id
+          data_type:
+            primitive_type:
+              keyword: bigint
+      - comma: ','
+      - column_definition:
+          naked_identifier: data
+          data_type:
+            primitive_type:
+              keyword: string
+      - comma: ','
+      - column_definition:
+          naked_identifier: category
+          data_type:
+            primitive_type:
+              keyword: string
+      - comma: ','
+      - column_definition:
+          naked_identifier: ts
+          data_type:
+            primitive_type:
+              keyword: timestamp
+      - end_bracket: )
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: iceberg
+    - keyword: PARTITIONED
+    - keyword: BY
+    - bracketed:
+      - start_bracket: (
+      - iceberg_transformation:
+          keyword: bucket
+          bracketed:
+            start_bracket: (
+            numeric_literal: '16'
+            comma: ','
+            column_reference:
+              naked_identifier: id
+            end_bracket: )
+      - comma: ','
+      - iceberg_transformation:
+          keyword: days
+          bracketed:
+            start_bracket: (
+            column_reference:
+              naked_identifier: ts
+            end_bracket: )
+      - comma: ','
+      - column_reference:
+          naked_identifier: category
+      - end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: iceberg
+    - keyword: PARTITIONED
+    - keyword: BY
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: part
+        end_bracket: )
+    - keyword: TBLPROPERTIES
+    - bracketed:
+        start_bracket: (
+        property_name_identifier:
+          quoted_identifier: "'key'"
+        comparison_operator:
+          raw_comparison_operator: '='
+        quoted_literal: "'value'"
+        end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/sparksql/iceberg_replace_table.sql b/test/fixtures/dialects/sparksql/iceberg_replace_table.sql
new file mode 100644
index 0000000..0af5fb9
--- /dev/null
+++ b/test/fixtures/dialects/sparksql/iceberg_replace_table.sql
@@ -0,0 +1,13 @@
+-- Iceberg Spark DDL Create Table Statements https://iceberg.apache.org/docs/latest/spark-ddl/#replace-table--as-select
+
+REPLACE TABLE prod.db.sample
+USING iceberg;
+
+REPLACE TABLE prod.db.sample
+USING iceberg
+PARTITIONED BY (part)
+TBLPROPERTIES ('key'='value');
+
+CREATE OR REPLACE TABLE prod.db.sample
+USING iceberg;
+
diff --git a/test/fixtures/dialects/sparksql/iceberg_replace_table.yml b/test/fixtures/dialects/sparksql/iceberg_replace_table.yml
new file mode 100644
index 0000000..ff3d448
--- /dev/null
+++ b/test/fixtures/dialects/sparksql/iceberg_replace_table.yml
@@ -0,0 +1,70 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 85f7ed31e58bf0a5f4c8fffee2974bf12aa4f1b9027b92050953c12486797db6
+file:
+- statement:
+    replace_table_statement:
+    - keyword: REPLACE
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: iceberg
+- statement_terminator: ;
+- statement:
+    replace_table_statement:
+    - keyword: REPLACE
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: iceberg
+    - keyword: PARTITIONED
+    - keyword: BY
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: part
+        end_bracket: )
+    - keyword: TBLPROPERTIES
+    - bracketed:
+        start_bracket: (
+        property_name_identifier:
+          quoted_identifier: "'key'"
+        comparison_operator:
+          raw_comparison_operator: '='
+        quoted_literal: "'value'"
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: OR
+    - keyword: REPLACE
+    - keyword: TABLE
+    - table_reference:
+      - naked_identifier: prod
+      - dot: .
+      - naked_identifier: db
+      - dot: .
+      - naked_identifier: sample
+    - using_clause:
+        keyword: USING
+        data_source_format:
+          keyword: iceberg
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/sparksql/insert_overwrite_directory.yml b/test/fixtures/dialects/sparksql/insert_overwrite_directory.yml
index 9088174..ac105e5 100644
--- a/test/fixtures/dialects/sparksql/insert_overwrite_directory.yml
+++ b/test/fixtures/dialects/sparksql/insert_overwrite_directory.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 25c9fd6dd10711ed57f7bed499adc04d5252f92e27b212ed9ade578c0bd5e6d1
+_hash: 1693594c7c492caa9efe94c8fdd1a42c392020ee78aea06c6b922bf4e444652a
 file:
 - statement:
     insert_overwrite_directory_statement:
@@ -12,7 +12,8 @@ file:
     - keyword: DIRECTORY
     - quoted_literal: "'/tmp/destination'"
     - keyword: USING
-    - keyword: PARQUET
+    - data_source_format:
+        keyword: PARQUET
     - keyword: OPTIONS
     - bracketed:
       - start_bracket: (
@@ -60,7 +61,8 @@ file:
     - keyword: OVERWRITE
     - keyword: DIRECTORY
     - keyword: USING
-    - keyword: PARQUET
+    - data_source_format:
+        keyword: PARQUET
     - keyword: OPTIONS
     - bracketed:
       - start_bracket: (
@@ -108,7 +110,8 @@ file:
     - keyword: OVERWRITE
     - keyword: DIRECTORY
     - keyword: USING
-    - keyword: PARQUET
+    - data_source_format:
+        keyword: PARQUET
     - keyword: OPTIONS
     - bracketed:
       - start_bracket: (
@@ -149,7 +152,8 @@ file:
     - keyword: DIRECTORY
     - quoted_literal: "'/tmp/destination'"
     - keyword: USING
-    - keyword: PARQUET
+    - data_source_format:
+        keyword: PARQUET
     - keyword: OPTIONS
     - bracketed:
       - start_bracket: (
diff --git a/test/fixtures/dialects/sparksql/numeric_literal.yml b/test/fixtures/dialects/sparksql/numeric_literal.yml
index 18dc54e..c0bc904 100644
--- a/test/fixtures/dialects/sparksql/numeric_literal.yml
+++ b/test/fixtures/dialects/sparksql/numeric_literal.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 72d4e1b192aea1c4eaf970bf27f493e205d6c131d1e6e09bd636f9f51e4a5245
+_hash: d06536672a4e28d97dd08f5456cf9201d7712426ae8ac0458d0083569d997c3e
 file:
   statement:
     select_statement:
@@ -216,18 +216,18 @@ file:
             naked_identifier: baz
         - comparison_operator:
             raw_comparison_operator: '>'
+        - sign_indicator: '-'
+        - sign_indicator: +
         - numeric_literal:
-          - sign_indicator: '-'
-          - sign_indicator: +
-          - sign_indicator: '-'
-          - numeric_literal: '1'
+            sign_indicator: '-'
+            numeric_literal: '1'
         - binary_operator: AND
         - column_reference:
             naked_identifier: baz
         - comparison_operator:
             raw_comparison_operator: '>'
+        - sign_indicator: '-'
+        - sign_indicator: +
         - numeric_literal:
-          - sign_indicator: '-'
-          - sign_indicator: +
-          - sign_indicator: '-'
-          - numeric_literal: '1'
+            sign_indicator: '-'
+            numeric_literal: '1'
diff --git a/test/fixtures/dialects/sparksql/pivot_clause.sql b/test/fixtures/dialects/sparksql/pivot_clause.sql
index cb76913..a8d6ef8 100644
--- a/test/fixtures/dialects/sparksql/pivot_clause.sql
+++ b/test/fixtures/dialects/sparksql/pivot_clause.sql
@@ -71,3 +71,15 @@ FROM person
         SUM(age) AS a, AVG(class) AS c
         FOR (name, age) IN ('John' AS c1, ('Mike', 40) AS c2)
     );
+
+
+SELECT * FROM (
+  some_table
+) PIVOT (
+  min(timestamp_ns) / 1e9 as min_timestamp_s -- this is the offending line
+
+  FOR run_id in (
+    test_run_id as test,
+    ctrl_run_id as ctrl
+  )
+);
diff --git a/test/fixtures/dialects/sparksql/pivot_clause.yml b/test/fixtures/dialects/sparksql/pivot_clause.yml
index 5704511..44c7aff 100644
--- a/test/fixtures/dialects/sparksql/pivot_clause.yml
+++ b/test/fixtures/dialects/sparksql/pivot_clause.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: cc37cc13dd4c0f8fa749d3a979221aff04f86f70c50d8f9e18bf771127fd2155
+_hash: 6fe63a40ae5b468ad5207ebec94cf11a25d2bcb96fb8437e5534cd2a96b54f92
 file:
 - statement:
     select_statement:
@@ -574,3 +574,61 @@ file:
                 - end_bracket: )
               - end_bracket: )
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            bracketed:
+              start_bracket: (
+              table_expression:
+                table_reference:
+                  naked_identifier: some_table
+              end_bracket: )
+            pivot_clause:
+              keyword: PIVOT
+              bracketed:
+              - start_bracket: (
+              - expression:
+                  function:
+                    function_name:
+                      function_name_identifier: min
+                    bracketed:
+                      start_bracket: (
+                      expression:
+                        column_reference:
+                          naked_identifier: timestamp_ns
+                      end_bracket: )
+                  binary_operator: /
+                  numeric_literal: 1e9
+              - alias_expression:
+                  keyword: as
+                  naked_identifier: min_timestamp_s
+              - keyword: FOR
+              - naked_identifier: run_id
+              - keyword: in
+              - bracketed:
+                - start_bracket: (
+                - expression:
+                    column_reference:
+                      naked_identifier: test_run_id
+                - alias_expression:
+                    keyword: as
+                    naked_identifier: test
+                - comma: ','
+                - expression:
+                    column_reference:
+                      naked_identifier: ctrl_run_id
+                - alias_expression:
+                    keyword: as
+                    naked_identifier: ctrl
+                - end_bracket: )
+              - end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/sparksql/select_distribute_by.sql b/test/fixtures/dialects/sparksql/select_distribute_by.sql
index 17b5fef..7aedf16 100644
--- a/test/fixtures/dialects/sparksql/select_distribute_by.sql
+++ b/test/fixtures/dialects/sparksql/select_distribute_by.sql
@@ -50,3 +50,12 @@ FROM person
 GROUP BY age
 HAVING COUNT(age) > 1
 DISTRIBUTE BY age;
+
+SELECT
+    age,
+    name
+FROM person
+GROUP BY age
+HAVING COUNT(age) > 1
+DISTRIBUTE BY age
+SORT BY age;
diff --git a/test/fixtures/dialects/sparksql/select_distribute_by.yml b/test/fixtures/dialects/sparksql/select_distribute_by.yml
index 2769f22..13905a1 100644
--- a/test/fixtures/dialects/sparksql/select_distribute_by.yml
+++ b/test/fixtures/dialects/sparksql/select_distribute_by.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 8f2f532ff7665ea2c71c0cf1909410c79e774bdeac37a31ef2c4671685c42084
+_hash: 9ce5dad4768aabdc2d0ae563cdd3b7b9db42c3608bc643d818f18cc212079131
 file:
 - statement:
     select_statement:
@@ -235,3 +235,52 @@ file:
       - column_reference:
           naked_identifier: age
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: age
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: name
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: person
+      groupby_clause:
+      - keyword: GROUP
+      - keyword: BY
+      - column_reference:
+          naked_identifier: age
+      having_clause:
+        keyword: HAVING
+        expression:
+          function:
+            function_name:
+              function_name_identifier: COUNT
+            bracketed:
+              start_bracket: (
+              expression:
+                column_reference:
+                  naked_identifier: age
+              end_bracket: )
+          comparison_operator:
+            raw_comparison_operator: '>'
+          numeric_literal: '1'
+      distribute_by_clause:
+      - keyword: DISTRIBUTE
+      - keyword: BY
+      - column_reference:
+          naked_identifier: age
+      sort_by_clause:
+      - keyword: SORT
+      - keyword: BY
+      - column_reference:
+          naked_identifier: age
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/sparksql/select_div.sql b/test/fixtures/dialects/sparksql/select_div.sql
new file mode 100644
index 0000000..b940f22
--- /dev/null
+++ b/test/fixtures/dialects/sparksql/select_div.sql
@@ -0,0 +1 @@
+SELECT 3 DIV 2;
diff --git a/test/fixtures/dialects/sparksql/select_div.yml b/test/fixtures/dialects/sparksql/select_div.yml
new file mode 100644
index 0000000..a6a98bd
--- /dev/null
+++ b/test/fixtures/dialects/sparksql/select_div.yml
@@ -0,0 +1,18 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 51cb10bd9421e08eb8915768a600cb5424ff64f19b8ecc513413f82ea2da8a15
+file:
+  statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          expression:
+          - numeric_literal: '3'
+          - binary_operator:
+              keyword: DIV
+          - numeric_literal: '2'
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/sparksql/select_from_file.sql b/test/fixtures/dialects/sparksql/select_from_file.sql
index bd22b70..ec94a2f 100644
--- a/test/fixtures/dialects/sparksql/select_from_file.sql
+++ b/test/fixtures/dialects/sparksql/select_from_file.sql
@@ -41,7 +41,7 @@ SELECT
 FROM TEXT.`examples/src/main/resources/people.txt`;
 
 -- Tests for Inline Path Glob Filter
--- https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html#path-global-filter  --noqa: L016
+-- https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html#path-global-filter  --noqa: LT05
 -- Inline Path Filter using Asterisk (*)
 SELECT
     a,
diff --git a/test/fixtures/dialects/sparksql/select_from_lateral_view.sql b/test/fixtures/dialects/sparksql/select_from_lateral_view.sql
index 1e0472e..3b8cb14 100644
--- a/test/fixtures/dialects/sparksql/select_from_lateral_view.sql
+++ b/test/fixtures/dialects/sparksql/select_from_lateral_view.sql
@@ -35,6 +35,7 @@ SELECT
     age,
     class,
     address,
+    time,
     c_age
 FROM person
     LATERAL VIEW OUTER EXPLODE(ARRAY()) tbl_name AS c_age;
@@ -54,3 +55,11 @@ SELECT
     exploded_people.state
 FROM person AS p
     LATERAL VIEW INLINE(array_of_structs) exploded_people AS name, age, state;
+
+SELECT
+    p.id,
+    exploded_people.name,
+    exploded_people.age,
+    exploded_people.state
+FROM person AS p
+    LATERAL VIEW INLINE(array_of_structs) exploded_people;
diff --git a/test/fixtures/dialects/sparksql/select_from_lateral_view.yml b/test/fixtures/dialects/sparksql/select_from_lateral_view.yml
index 9e4042f..19b3a70 100644
--- a/test/fixtures/dialects/sparksql/select_from_lateral_view.yml
+++ b/test/fixtures/dialects/sparksql/select_from_lateral_view.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 8124d5d4d98bab9117a490802c9613bc6853607423e55c20293fc72b30691f84
+_hash: 34f23d4c7afcdc323487d4e2ab4b5569cff274159a3fb975871040e659dc898e
 file:
 - statement:
     select_statement:
@@ -65,8 +65,9 @@ file:
                       - end_bracket: )
                   end_bracket: )
             - naked_identifier: tbl_name
-            - keyword: AS
-            - naked_identifier: c_age
+            - alias_expression:
+                keyword: AS
+                naked_identifier: c_age
           - lateral_view_clause:
             - keyword: LATERAL
             - keyword: VIEW
@@ -88,8 +89,9 @@ file:
                           numeric_literal: '80'
                       - end_bracket: )
                   end_bracket: )
-            - keyword: AS
-            - naked_identifier: d_age
+            - alias_expression:
+                keyword: AS
+                naked_identifier: d_age
 - statement_terminator: ;
 - statement:
     select_statement:
@@ -138,8 +140,9 @@ file:
                           numeric_literal: '60'
                       - end_bracket: )
                   end_bracket: )
-            - keyword: AS
-            - naked_identifier: c_age
+            - alias_expression:
+                keyword: AS
+                naked_identifier: c_age
           - lateral_view_clause:
             - keyword: LATERAL
             - keyword: VIEW
@@ -161,8 +164,9 @@ file:
                           numeric_literal: '80'
                       - end_bracket: )
                   end_bracket: )
-            - keyword: AS
-            - naked_identifier: d_age
+            - alias_expression:
+                keyword: AS
+                naked_identifier: d_age
       groupby_clause:
       - keyword: GROUP
       - keyword: BY
@@ -224,8 +228,9 @@ file:
                         end_bracket: )
                   end_bracket: )
             - naked_identifier: tbl_name
-            - keyword: AS
-            - naked_identifier: c_age
+            - alias_expression:
+                keyword: AS
+                naked_identifier: c_age
 - statement_terminator: ;
 - statement:
     select_statement:
@@ -251,6 +256,10 @@ file:
           column_reference:
             naked_identifier: address
       - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: time
+      - comma: ','
       - select_clause_element:
           column_reference:
             naked_identifier: c_age
@@ -279,8 +288,9 @@ file:
                         end_bracket: )
                   end_bracket: )
             - naked_identifier: tbl_name
-            - keyword: AS
-            - naked_identifier: c_age
+            - alias_expression:
+                keyword: AS
+                naked_identifier: c_age
 - statement_terminator: ;
 - statement:
     select_statement:
@@ -310,12 +320,78 @@ file:
           - dot: .
           - naked_identifier: state
       from_clause:
-        keyword: FROM
-        from_expression:
+      - keyword: FROM
+      - from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: person
+            lateral_view_clause:
+            - keyword: LATERAL
+            - keyword: VIEW
+            - function:
+                function_name:
+                  function_name_identifier: INLINE
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    column_reference:
+                      naked_identifier: array_of_structs
+                  end_bracket: )
+            - naked_identifier: exploded_people
+            - alias_expression:
+                keyword: AS
+                naked_identifier: name
+      - comma: ','
+      - from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: age
+      - comma: ','
+      - from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: state
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+          - naked_identifier: p
+          - dot: .
+          - naked_identifier: id
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+          - naked_identifier: exploded_people
+          - dot: .
+          - naked_identifier: name
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+          - naked_identifier: exploded_people
+          - dot: .
+          - naked_identifier: age
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+          - naked_identifier: exploded_people
+          - dot: .
+          - naked_identifier: state
+      from_clause:
+      - keyword: FROM
+      - from_expression:
           from_expression_element:
             table_expression:
               table_reference:
                 naked_identifier: person
+            alias_expression:
+              keyword: AS
+              naked_identifier: p
             lateral_view_clause:
             - keyword: LATERAL
             - keyword: VIEW
@@ -329,12 +405,21 @@ file:
                       naked_identifier: array_of_structs
                   end_bracket: )
             - naked_identifier: exploded_people
-            - keyword: AS
-            - naked_identifier: name
-            - comma: ','
-            - naked_identifier: age
-            - comma: ','
-            - naked_identifier: state
+            - alias_expression:
+                keyword: AS
+                naked_identifier: name
+      - comma: ','
+      - from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: age
+      - comma: ','
+      - from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: state
 - statement_terminator: ;
 - statement:
     select_statement:
@@ -386,10 +471,4 @@ file:
                       naked_identifier: array_of_structs
                   end_bracket: )
             - naked_identifier: exploded_people
-            - keyword: AS
-            - naked_identifier: name
-            - comma: ','
-            - naked_identifier: age
-            - comma: ','
-            - naked_identifier: state
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/sparksql/select_from_where_clause.sql b/test/fixtures/dialects/sparksql/select_from_where_clause.sql
index 2f8cd94..5c5834e 100644
--- a/test/fixtures/dialects/sparksql/select_from_where_clause.sql
+++ b/test/fixtures/dialects/sparksql/select_from_where_clause.sql
@@ -37,3 +37,17 @@ WHERE EXISTS (
         SELECT 1 FROM person
         WHERE person.id = person.id AND person.age IS NULL
     );
+
+SELECT 
+    name,
+    age
+FROM person
+WHERE person.id is distinct from person.age;
+
+SELECT 
+    name,
+    age
+FROM person
+WHERE person.id is not distinct from person.age
+
+
diff --git a/test/fixtures/dialects/sparksql/select_from_where_clause.yml b/test/fixtures/dialects/sparksql/select_from_where_clause.yml
index ef053f1..44a94df 100644
--- a/test/fixtures/dialects/sparksql/select_from_where_clause.yml
+++ b/test/fixtures/dialects/sparksql/select_from_where_clause.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 59a2bc011fdb7b3367b1d7ce3761037c884ce8298414f1fd7d7de2b2e5f349b8
+_hash: f80fb596f8f8960dbd91f35c34a72819d16df7ead3a4fab481a439cd14e54ddf
 file:
 - statement:
     select_statement:
@@ -255,3 +255,69 @@ file:
                 - keyword: 'NULL'
             end_bracket: )
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: name
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: age
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: person
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+          - naked_identifier: person
+          - dot: .
+          - naked_identifier: id
+        - keyword: is
+        - keyword: distinct
+        - keyword: from
+        - column_reference:
+          - naked_identifier: person
+          - dot: .
+          - naked_identifier: age
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: name
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: age
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: person
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+          - naked_identifier: person
+          - dot: .
+          - naked_identifier: id
+        - keyword: is
+        - keyword: not
+        - keyword: distinct
+        - keyword: from
+        - column_reference:
+          - naked_identifier: person
+          - dot: .
+          - naked_identifier: age
diff --git a/test/fixtures/dialects/sparksql/select_group_by.sql b/test/fixtures/dialects/sparksql/select_group_by.sql
index b6abe7f..6f2ebc7 100644
--- a/test/fixtures/dialects/sparksql/select_group_by.sql
+++ b/test/fixtures/dialects/sparksql/select_group_by.sql
@@ -47,6 +47,22 @@ FROM dealer
 GROUP BY GROUPING SETS ((city, car_model), (city), (car_model), ())
 ORDER BY city;
 
+SELECT
+    city,
+    car_model,
+    sum(quantity) AS sum_quantity
+FROM dealer
+GROUP BY city, car_model GROUPING SETS ((city, car_model), (city), (car_model), ())
+ORDER BY city;
+
+SELECT
+    city,
+    car_model,
+    sum(quantity) AS sum_quantity
+FROM dealer
+GROUP BY city, car_model, GROUPING SETS ((city, car_model), (city), (car_model), ())
+ORDER BY city;
+
 -- Group by processing with `ROLLUP` clause.
 -- Equivalent GROUP BY GROUPING SETS ((city, car_model), (city), ())
 SELECT
diff --git a/test/fixtures/dialects/sparksql/select_group_by.yml b/test/fixtures/dialects/sparksql/select_group_by.yml
index 14ca09e..cd3ac23 100644
--- a/test/fixtures/dialects/sparksql/select_group_by.yml
+++ b/test/fixtures/dialects/sparksql/select_group_by.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: dea545107ae74b4b6b065945481cf2301d6d483c6526ece7dd103b6cc8aa6eef
+_hash: 70cb58e731236ab6b3f6b06a3afef4c8ff94318169a874eb2c9567a8eceecabb
 file:
 - statement:
     select_statement:
@@ -299,6 +299,169 @@ file:
       - column_reference:
           naked_identifier: city
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: city
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: car_model
+      - comma: ','
+      - select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: sum
+            bracketed:
+              start_bracket: (
+              expression:
+                column_reference:
+                  naked_identifier: quantity
+              end_bracket: )
+          alias_expression:
+            keyword: AS
+            naked_identifier: sum_quantity
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: dealer
+      groupby_clause:
+      - keyword: GROUP
+      - keyword: BY
+      - column_reference:
+          naked_identifier: city
+      - comma: ','
+      - column_reference:
+          naked_identifier: car_model
+      - grouping_sets_clause:
+        - keyword: GROUPING
+        - keyword: SETS
+        - bracketed:
+            start_bracket: (
+            grouping_expression_list:
+            - bracketed:
+              - start_bracket: (
+              - expression:
+                  column_reference:
+                    naked_identifier: city
+              - comma: ','
+              - expression:
+                  column_reference:
+                    naked_identifier: car_model
+              - end_bracket: )
+            - comma: ','
+            - bracketed:
+                start_bracket: (
+                expression:
+                  column_reference:
+                    naked_identifier: city
+                end_bracket: )
+            - comma: ','
+            - bracketed:
+                start_bracket: (
+                expression:
+                  column_reference:
+                    naked_identifier: car_model
+                end_bracket: )
+            - comma: ','
+            - bracketed:
+                start_bracket: (
+                end_bracket: )
+            end_bracket: )
+      orderby_clause:
+      - keyword: ORDER
+      - keyword: BY
+      - column_reference:
+          naked_identifier: city
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: city
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: car_model
+      - comma: ','
+      - select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: sum
+            bracketed:
+              start_bracket: (
+              expression:
+                column_reference:
+                  naked_identifier: quantity
+              end_bracket: )
+          alias_expression:
+            keyword: AS
+            naked_identifier: sum_quantity
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: dealer
+      groupby_clause:
+      - keyword: GROUP
+      - keyword: BY
+      - column_reference:
+          naked_identifier: city
+      - comma: ','
+      - column_reference:
+          naked_identifier: car_model
+      - comma: ','
+      - grouping_sets_clause:
+        - keyword: GROUPING
+        - keyword: SETS
+        - bracketed:
+            start_bracket: (
+            grouping_expression_list:
+            - bracketed:
+              - start_bracket: (
+              - expression:
+                  column_reference:
+                    naked_identifier: city
+              - comma: ','
+              - expression:
+                  column_reference:
+                    naked_identifier: car_model
+              - end_bracket: )
+            - comma: ','
+            - bracketed:
+                start_bracket: (
+                expression:
+                  column_reference:
+                    naked_identifier: city
+                end_bracket: )
+            - comma: ','
+            - bracketed:
+                start_bracket: (
+                expression:
+                  column_reference:
+                    naked_identifier: car_model
+                end_bracket: )
+            - comma: ','
+            - bracketed:
+                start_bracket: (
+                end_bracket: )
+            end_bracket: )
+      orderby_clause:
+      - keyword: ORDER
+      - keyword: BY
+      - column_reference:
+          naked_identifier: city
+- statement_terminator: ;
 - statement:
     select_statement:
       select_clause:
diff --git a/test/fixtures/dialects/sparksql/select_hints.sql b/test/fixtures/dialects/sparksql/select_hints.sql
index ffce772..bf5b03c 100644
--- a/test/fixtures/dialects/sparksql/select_hints.sql
+++ b/test/fixtures/dialects/sparksql/select_hints.sql
@@ -110,3 +110,9 @@ SELECT /*+ BROADCAST(t1), MERGE(t1, t2) */
     t1.b,
     t2.c
 FROM t1 INNER JOIN t2 ON t1.key = t2.key;
+
+SELECT /*+ BROADCAST(db.t1) */
+    t1.a,
+    t1.b,
+    t2.c
+FROM t1 INNER JOIN t2 ON t1.key = t2.key;
diff --git a/test/fixtures/dialects/sparksql/select_hints.yml b/test/fixtures/dialects/sparksql/select_hints.yml
index 0c85258..79b7fa1 100644
--- a/test/fixtures/dialects/sparksql/select_hints.yml
+++ b/test/fixtures/dialects/sparksql/select_hints.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 2e4ecf246343b39a5fee5592a9c83fbe965bf86696b864abfde617fc031dc779
+_hash: 17d210004cf1b2cc4a9861681dd50739ed79ac4a0172ae16ad8a18fd885c03cd
 file:
 - statement:
     select_statement:
@@ -879,3 +879,66 @@ file:
                 - dot: .
                 - naked_identifier: key
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_modifier:
+          select_hint:
+            start_hint: /*+
+            hint_function:
+              function_name:
+                function_name_identifier: BROADCAST
+              bracketed:
+                start_bracket: (
+                table_reference:
+                - naked_identifier: db
+                - dot: .
+                - naked_identifier: t1
+                end_bracket: )
+            end_hint: '*/'
+      - select_clause_element:
+          column_reference:
+          - naked_identifier: t1
+          - dot: .
+          - naked_identifier: a
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+          - naked_identifier: t1
+          - dot: .
+          - naked_identifier: b
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+          - naked_identifier: t2
+          - dot: .
+          - naked_identifier: c
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: t1
+          join_clause:
+          - keyword: INNER
+          - keyword: JOIN
+          - from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: t2
+          - join_on_condition:
+              keyword: 'ON'
+              expression:
+              - column_reference:
+                - naked_identifier: t1
+                - dot: .
+                - naked_identifier: key
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                - naked_identifier: t2
+                - dot: .
+                - naked_identifier: key
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/sparksql/select_lateral_view_supported_tvf.yml b/test/fixtures/dialects/sparksql/select_lateral_view_supported_tvf.yml
index 051ccfc..e9a7c4a 100644
--- a/test/fixtures/dialects/sparksql/select_lateral_view_supported_tvf.yml
+++ b/test/fixtures/dialects/sparksql/select_lateral_view_supported_tvf.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 178acfd014cf9b8b0815c26e4b85e0121265e0f2aa05a2f1146a3c8f60f91449
+_hash: 0174a44ebf919b68b3717938814bd471de9b0122e36ec569fd4c553d9fd48707
 file:
 - statement:
     select_statement:
@@ -96,8 +96,9 @@ file:
                           numeric_literal: '4'
                       - end_bracket: )
                   end_bracket: )
-            - keyword: AS
-            - naked_identifier: c2
+            - alias_expression:
+                keyword: AS
+                naked_identifier: c2
 - statement_terminator: ;
 - statement:
     select_statement:
@@ -142,8 +143,9 @@ file:
                           numeric_literal: '4'
                       - end_bracket: )
                   end_bracket: )
-            - keyword: AS
-            - naked_identifier: c2
+            - alias_expression:
+                keyword: AS
+                naked_identifier: c2
 - statement_terminator: ;
 - statement:
     select_statement:
@@ -249,8 +251,8 @@ file:
           - dot: .
           - naked_identifier: b
       from_clause:
-        keyword: FROM
-        from_expression:
+      - keyword: FROM
+      - from_expression:
           from_expression_element:
             table_expression:
               table_reference:
@@ -296,10 +298,15 @@ file:
                             - end_bracket: )
                       - end_bracket: )
                   end_bracket: )
-            - keyword: AS
-            - naked_identifier: c1
-            - comma: ','
-            - naked_identifier: c2
+            - alias_expression:
+                keyword: AS
+                naked_identifier: c1
+      - comma: ','
+      - from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: c2
 - statement_terminator: ;
 - statement:
     select_statement:
@@ -317,8 +324,8 @@ file:
           - dot: .
           - naked_identifier: b
       from_clause:
-        keyword: FROM
-        from_expression:
+      - keyword: FROM
+      - from_expression:
           from_expression_element:
             table_expression:
               table_reference:
@@ -364,10 +371,15 @@ file:
                             - end_bracket: )
                       - end_bracket: )
                   end_bracket: )
-            - keyword: AS
-            - naked_identifier: c1
-            - comma: ','
-            - naked_identifier: c2
+            - alias_expression:
+                keyword: AS
+                naked_identifier: c1
+      - comma: ','
+      - from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: c2
 - statement_terminator: ;
 - statement:
     select_statement:
@@ -460,8 +472,9 @@ file:
                           numeric_literal: '20'
                       - end_bracket: )
                   end_bracket: )
-            - keyword: AS
-            - naked_identifier: c1
+            - alias_expression:
+                keyword: AS
+                naked_identifier: c1
 - statement_terminator: ;
 - statement:
     select_statement:
@@ -506,8 +519,9 @@ file:
                           numeric_literal: '20'
                       - end_bracket: )
                   end_bracket: )
-            - keyword: AS
-            - naked_identifier: c1
+            - alias_expression:
+                keyword: AS
+                naked_identifier: c1
 - statement_terminator: ;
 - statement:
     select_statement:
@@ -548,8 +562,8 @@ file:
           - dot: .
           - naked_identifier: b
       from_clause:
-        keyword: FROM
-        from_expression:
+      - keyword: FROM
+      - from_expression:
           from_expression_element:
             table_expression:
               table_reference:
@@ -574,10 +588,15 @@ file:
                 - expression:
                     numeric_literal: '3'
                 - end_bracket: )
-            - keyword: AS
-            - naked_identifier: c1
-            - comma: ','
-            - naked_identifier: c2
+            - alias_expression:
+                keyword: AS
+                naked_identifier: c1
+      - comma: ','
+      - from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: c2
 - statement_terminator: ;
 - statement:
     select_statement:
@@ -615,8 +634,8 @@ file:
           - dot: .
           - naked_identifier: b
       from_clause:
-        keyword: FROM
-        from_expression:
+      - keyword: FROM
+      - from_expression:
           from_expression_element:
             table_expression:
               table_reference:
@@ -638,10 +657,15 @@ file:
                 - expression:
                     quoted_literal: "'b'"
                 - end_bracket: )
-            - keyword: AS
-            - naked_identifier: c1
-            - comma: ','
-            - naked_identifier: c2
+            - alias_expression:
+                keyword: AS
+                naked_identifier: c1
+      - comma: ','
+      - from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: c2
 - statement_terminator: ;
 - statement:
     select_statement:
@@ -696,6 +720,7 @@ file:
                 - expression:
                     quoted_literal: "'HOST'"
                 - end_bracket: )
-            - keyword: AS
-            - naked_identifier: c1
+            - alias_expression:
+                keyword: AS
+                naked_identifier: c1
 - statement_terminator: ;
diff --git a/test/fixtures/dialects/sparksql/select_order_by.sql b/test/fixtures/dialects/sparksql/select_order_by.sql
index b0f3030..45ef2e0 100644
--- a/test/fixtures/dialects/sparksql/select_order_by.sql
+++ b/test/fixtures/dialects/sparksql/select_order_by.sql
@@ -28,3 +28,9 @@ SELECT
     name,
     age
 FROM person ORDER BY name ASC, age DESC;
+
+-- Sort rows using complex expression.
+SELECT
+    name,
+    age
+FROM person ORDER BY SUM(age)/SUM(age) DESC;
diff --git a/test/fixtures/dialects/sparksql/select_order_by.yml b/test/fixtures/dialects/sparksql/select_order_by.yml
index 8703f26..bc07d83 100644
--- a/test/fixtures/dialects/sparksql/select_order_by.yml
+++ b/test/fixtures/dialects/sparksql/select_order_by.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: b11da34ea5ce77f3558ddbbcbb649e3306d390d2aed7d81d5b05365afae9f44a
+_hash: afe247b8f1ce7f10f95249deb215a501e36fbb4ffc6ff751bb1d93eed2b84c2d
 file:
 - statement:
     select_statement:
@@ -136,3 +136,46 @@ file:
           naked_identifier: age
       - keyword: DESC
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+            naked_identifier: name
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: age
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: person
+      orderby_clause:
+      - keyword: ORDER
+      - keyword: BY
+      - expression:
+        - function:
+            function_name:
+              function_name_identifier: SUM
+            bracketed:
+              start_bracket: (
+              expression:
+                column_reference:
+                  naked_identifier: age
+              end_bracket: )
+        - binary_operator: /
+        - function:
+            function_name:
+              function_name_identifier: SUM
+            bracketed:
+              start_bracket: (
+              expression:
+                column_reference:
+                  naked_identifier: age
+              end_bracket: )
+      - keyword: DESC
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/sparksql/window_functions.sql b/test/fixtures/dialects/sparksql/window_functions.sql
index ce69e91..ce00908 100644
--- a/test/fixtures/dialects/sparksql/window_functions.sql
+++ b/test/fixtures/dialects/sparksql/window_functions.sql
@@ -112,3 +112,15 @@ SELECT
 FROM test_ignore_null AS ignore_nulls
     WINDOW w AS (ORDER BY ignore_nulls.id)
 ORDER BY ignore_nulls.id;
+
+SELECT
+    ignore_nulls.id,
+    ignore_nulls.v,
+    LEAD(ignore_nulls.v, 0) RESPECT NULLS OVER w AS v_lead,
+    LAG(ignore_nulls.v, 0) RESPECT NULLS OVER w AS v_lag,
+    NTH_VALUE(ignore_nulls.v, 2) RESPECT NULLS OVER w AS v_nth_value,
+    FIRST_VALUE(ignore_nulls.v) RESPECT NULLS OVER w AS v_first_value,
+    LAST_VALUE(ignore_nulls.v) RESPECT NULLS OVER w AS v_last_value
+FROM test_ignore_null AS ignore_nulls
+    WINDOW w AS (ORDER BY ignore_nulls.id range between interval 6 days preceding and current row)
+ORDER BY ignore_nulls.id;
diff --git a/test/fixtures/dialects/sparksql/window_functions.yml b/test/fixtures/dialects/sparksql/window_functions.yml
index d102ce5..765687d 100644
--- a/test/fixtures/dialects/sparksql/window_functions.yml
+++ b/test/fixtures/dialects/sparksql/window_functions.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: ee5f830c8e113fe87f49dcb120b0fe14735f8fe695a632db77f4cde8a6d003bf
+_hash: 00bbbff73f8f9a420c42c4cdd5d98e0f36c435b97662e439e295403c96aae249
 file:
 - statement:
     select_statement:
@@ -936,3 +936,178 @@ file:
         - dot: .
         - naked_identifier: id
 - statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_element:
+          column_reference:
+          - naked_identifier: ignore_nulls
+          - dot: .
+          - naked_identifier: id
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+          - naked_identifier: ignore_nulls
+          - dot: .
+          - naked_identifier: v
+      - comma: ','
+      - select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: LEAD
+            bracketed:
+            - start_bracket: (
+            - expression:
+                column_reference:
+                - naked_identifier: ignore_nulls
+                - dot: .
+                - naked_identifier: v
+            - comma: ','
+            - expression:
+                numeric_literal: '0'
+            - end_bracket: )
+            over_clause:
+            - keyword: RESPECT
+            - keyword: NULLS
+            - keyword: OVER
+            - naked_identifier: w
+          alias_expression:
+            keyword: AS
+            naked_identifier: v_lead
+      - comma: ','
+      - select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: LAG
+            bracketed:
+            - start_bracket: (
+            - expression:
+                column_reference:
+                - naked_identifier: ignore_nulls
+                - dot: .
+                - naked_identifier: v
+            - comma: ','
+            - expression:
+                numeric_literal: '0'
+            - end_bracket: )
+            over_clause:
+            - keyword: RESPECT
+            - keyword: NULLS
+            - keyword: OVER
+            - naked_identifier: w
+          alias_expression:
+            keyword: AS
+            naked_identifier: v_lag
+      - comma: ','
+      - select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: NTH_VALUE
+            bracketed:
+            - start_bracket: (
+            - expression:
+                column_reference:
+                - naked_identifier: ignore_nulls
+                - dot: .
+                - naked_identifier: v
+            - comma: ','
+            - expression:
+                numeric_literal: '2'
+            - end_bracket: )
+            over_clause:
+            - keyword: RESPECT
+            - keyword: NULLS
+            - keyword: OVER
+            - naked_identifier: w
+          alias_expression:
+            keyword: AS
+            naked_identifier: v_nth_value
+      - comma: ','
+      - select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: FIRST_VALUE
+            bracketed:
+              start_bracket: (
+              expression:
+                column_reference:
+                - naked_identifier: ignore_nulls
+                - dot: .
+                - naked_identifier: v
+              end_bracket: )
+            over_clause:
+            - keyword: RESPECT
+            - keyword: NULLS
+            - keyword: OVER
+            - naked_identifier: w
+          alias_expression:
+            keyword: AS
+            naked_identifier: v_first_value
+      - comma: ','
+      - select_clause_element:
+          function:
+            function_name:
+              function_name_identifier: LAST_VALUE
+            bracketed:
+              start_bracket: (
+              expression:
+                column_reference:
+                - naked_identifier: ignore_nulls
+                - dot: .
+                - naked_identifier: v
+              end_bracket: )
+            over_clause:
+            - keyword: RESPECT
+            - keyword: NULLS
+            - keyword: OVER
+            - naked_identifier: w
+          alias_expression:
+            keyword: AS
+            naked_identifier: v_last_value
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: test_ignore_null
+            alias_expression:
+              keyword: AS
+              naked_identifier: ignore_nulls
+            named_window:
+              keyword: WINDOW
+              named_window_expression:
+                naked_identifier: w
+                keyword: AS
+                bracketed:
+                  start_bracket: (
+                  window_specification:
+                    orderby_clause:
+                    - keyword: ORDER
+                    - keyword: BY
+                    - column_reference:
+                      - naked_identifier: ignore_nulls
+                      - dot: .
+                      - naked_identifier: id
+                    frame_clause:
+                    - keyword: range
+                    - keyword: between
+                    - interval_expression:
+                        keyword: interval
+                        interval_literal:
+                          numeric_literal: '6'
+                          date_part: days
+                    - keyword: preceding
+                    - keyword: and
+                    - keyword: current
+                    - keyword: row
+                  end_bracket: )
+      orderby_clause:
+      - keyword: ORDER
+      - keyword: BY
+      - column_reference:
+        - naked_identifier: ignore_nulls
+        - dot: .
+        - naked_identifier: id
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/sqlite/create_table.sql b/test/fixtures/dialects/sqlite/create_table.sql
new file mode 100644
index 0000000..01b4837
--- /dev/null
+++ b/test/fixtures/dialects/sqlite/create_table.sql
@@ -0,0 +1,6 @@
+CREATE TABLE users (
+    user_id INTEGER PRIMARY KEY AUTOINCREMENT,
+    username TEXT NOT NULL UNIQUE,
+    password TEXT NOT NULL,
+    email TEXT NOT NULL UNIQUE
+);
diff --git a/test/fixtures/dialects/sqlite/create_table.yml b/test/fixtures/dialects/sqlite/create_table.yml
new file mode 100644
index 0000000..8d35593
--- /dev/null
+++ b/test/fixtures/dialects/sqlite/create_table.yml
@@ -0,0 +1,53 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: d41b1c6f7c9e285362e2e75bbca5f6286c324a06946c837e5531684758e73b4c
+file:
+  statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: users
+    - bracketed:
+      - start_bracket: (
+      - column_definition:
+          naked_identifier: user_id
+          data_type:
+            data_type_identifier: INTEGER
+          column_constraint_segment:
+          - keyword: PRIMARY
+          - keyword: KEY
+          - keyword: AUTOINCREMENT
+      - comma: ','
+      - column_definition:
+        - naked_identifier: username
+        - data_type:
+            data_type_identifier: TEXT
+        - column_constraint_segment:
+          - keyword: NOT
+          - keyword: 'NULL'
+        - column_constraint_segment:
+            keyword: UNIQUE
+      - comma: ','
+      - column_definition:
+          naked_identifier: password
+          data_type:
+            data_type_identifier: TEXT
+          column_constraint_segment:
+          - keyword: NOT
+          - keyword: 'NULL'
+      - comma: ','
+      - column_definition:
+        - naked_identifier: email
+        - data_type:
+            data_type_identifier: TEXT
+        - column_constraint_segment:
+          - keyword: NOT
+          - keyword: 'NULL'
+        - column_constraint_segment:
+            keyword: UNIQUE
+      - end_bracket: )
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/sqlite/create_table_constraint_default.yml b/test/fixtures/dialects/sqlite/create_table_constraint_default.yml
index 70ebe40..c391d22 100644
--- a/test/fixtures/dialects/sqlite/create_table_constraint_default.yml
+++ b/test/fixtures/dialects/sqlite/create_table_constraint_default.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 149db629af0164d27dc37ef59df84f570101734b0f14157185888321791fddb1
+_hash: b465661bdd47b953b078b1bbeed25eaceeb4bbc4751d064066e83f9b4561876f
 file:
 - statement:
     transaction_statement:
@@ -24,7 +24,7 @@ file:
         column_definition:
         - quoted_identifier: '"col"'
         - data_type:
-            keyword: TIMESTAMP
+            data_type_identifier: TIMESTAMP
         - column_constraint_segment:
           - keyword: NOT
           - keyword: 'NULL'
diff --git a/test/fixtures/dialects/sqlite/drop_trigger.sql b/test/fixtures/dialects/sqlite/drop_trigger.sql
new file mode 100644
index 0000000..782fbb1
--- /dev/null
+++ b/test/fixtures/dialects/sqlite/drop_trigger.sql
@@ -0,0 +1 @@
+DROP TRIGGER IF EXISTS MyTestTrigger;
diff --git a/test/fixtures/dialects/sqlite/drop_trigger.yml b/test/fixtures/dialects/sqlite/drop_trigger.yml
new file mode 100644
index 0000000..ba9841e
--- /dev/null
+++ b/test/fixtures/dialects/sqlite/drop_trigger.yml
@@ -0,0 +1,16 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 56de27b71563494c78c91fd576e749086a7271b6cefb3cb331727e560eaf8b23
+file:
+  statement:
+    drop_trigger:
+    - keyword: DROP
+    - keyword: TRIGGER
+    - keyword: IF
+    - keyword: EXISTS
+    - trigger_reference:
+        naked_identifier: MyTestTrigger
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/sqlite/pragma.sql b/test/fixtures/dialects/sqlite/pragma.sql
new file mode 100644
index 0000000..ee2dbd8
--- /dev/null
+++ b/test/fixtures/dialects/sqlite/pragma.sql
@@ -0,0 +1,29 @@
+PRAGMA analysis_limit = 7;
+
+PRAGMA schema.application_id;
+
+PRAGMA schema.auto_vacuum = INCREMENTAL;
+
+PRAGMA automatic_index = TRUE;
+
+PRAGMA schema.cache_size = -500;
+
+PRAGMA collation_list;
+
+PRAGMA data_store_directory = 'directory-name';
+
+PRAGMA encoding = 'UTF-16be';
+
+PRAGMA schema.foreign_key_check('table-name');
+
+PRAGMA schema.journal_mode = WAL;
+
+PRAGMA schema.locking_mode = NORMAL;
+
+PRAGMA schema.secure_delete = FAST;
+
+PRAGMA schema.synchronous = 0;
+
+PRAGMA temp_store = DEFAULT;
+
+PRAGMA schema.wal_checkpoint(FULL);
diff --git a/test/fixtures/dialects/sqlite/pragma.yml b/test/fixtures/dialects/sqlite/pragma.yml
new file mode 100644
index 0000000..89d1799
--- /dev/null
+++ b/test/fixtures/dialects/sqlite/pragma.yml
@@ -0,0 +1,158 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 8e46290d567297bf2f869781593433e524a9205419e5b20bf5bd978b665737f2
+file:
+- statement:
+    pragma_statement:
+      keyword: PRAGMA
+      pragma_reference:
+        naked_identifier: analysis_limit
+      comparison_operator:
+        raw_comparison_operator: '='
+      numeric_literal: '7'
+- statement_terminator: ;
+- statement:
+    pragma_statement:
+      keyword: PRAGMA
+      pragma_reference:
+      - naked_identifier: schema
+      - dot: .
+      - naked_identifier: application_id
+- statement_terminator: ;
+- statement:
+    pragma_statement:
+    - keyword: PRAGMA
+    - pragma_reference:
+      - naked_identifier: schema
+      - dot: .
+      - naked_identifier: auto_vacuum
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: INCREMENTAL
+- statement_terminator: ;
+- statement:
+    pragma_statement:
+      keyword: PRAGMA
+      pragma_reference:
+        naked_identifier: automatic_index
+      comparison_operator:
+        raw_comparison_operator: '='
+      boolean_literal: 'TRUE'
+- statement_terminator: ;
+- statement:
+    pragma_statement:
+      keyword: PRAGMA
+      pragma_reference:
+      - naked_identifier: schema
+      - dot: .
+      - naked_identifier: cache_size
+      comparison_operator:
+        raw_comparison_operator: '='
+      numeric_literal:
+        sign_indicator: '-'
+        numeric_literal: '500'
+- statement_terminator: ;
+- statement:
+    pragma_statement:
+      keyword: PRAGMA
+      pragma_reference:
+        naked_identifier: collation_list
+- statement_terminator: ;
+- statement:
+    pragma_statement:
+      keyword: PRAGMA
+      pragma_reference:
+        naked_identifier: data_store_directory
+      comparison_operator:
+        raw_comparison_operator: '='
+      quoted_literal: "'directory-name'"
+- statement_terminator: ;
+- statement:
+    pragma_statement:
+      keyword: PRAGMA
+      pragma_reference:
+        naked_identifier: encoding
+      comparison_operator:
+        raw_comparison_operator: '='
+      quoted_literal: "'UTF-16be'"
+- statement_terminator: ;
+- statement:
+    pragma_statement:
+      keyword: PRAGMA
+      pragma_reference:
+      - naked_identifier: schema
+      - dot: .
+      - naked_identifier: foreign_key_check
+      bracketed:
+        start_bracket: (
+        quoted_literal: "'table-name'"
+        end_bracket: )
+- statement_terminator: ;
+- statement:
+    pragma_statement:
+    - keyword: PRAGMA
+    - pragma_reference:
+      - naked_identifier: schema
+      - dot: .
+      - naked_identifier: journal_mode
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: WAL
+- statement_terminator: ;
+- statement:
+    pragma_statement:
+    - keyword: PRAGMA
+    - pragma_reference:
+      - naked_identifier: schema
+      - dot: .
+      - naked_identifier: locking_mode
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: NORMAL
+- statement_terminator: ;
+- statement:
+    pragma_statement:
+    - keyword: PRAGMA
+    - pragma_reference:
+      - naked_identifier: schema
+      - dot: .
+      - naked_identifier: secure_delete
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: FAST
+- statement_terminator: ;
+- statement:
+    pragma_statement:
+      keyword: PRAGMA
+      pragma_reference:
+      - naked_identifier: schema
+      - dot: .
+      - naked_identifier: synchronous
+      comparison_operator:
+        raw_comparison_operator: '='
+      numeric_literal: '0'
+- statement_terminator: ;
+- statement:
+    pragma_statement:
+    - keyword: PRAGMA
+    - pragma_reference:
+        naked_identifier: temp_store
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: DEFAULT
+- statement_terminator: ;
+- statement:
+    pragma_statement:
+      keyword: PRAGMA
+      pragma_reference:
+      - naked_identifier: schema
+      - dot: .
+      - naked_identifier: wal_checkpoint
+      bracketed:
+        start_bracket: (
+        keyword: FULL
+        end_bracket: )
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/sqlite/select.sql b/test/fixtures/dialects/sqlite/select.sql
new file mode 100644
index 0000000..2f053de
--- /dev/null
+++ b/test/fixtures/dialects/sqlite/select.sql
@@ -0,0 +1 @@
+SELECT a FROM foo LIMIT 10;
diff --git a/test/fixtures/dialects/sqlite/select.yml b/test/fixtures/dialects/sqlite/select.yml
new file mode 100644
index 0000000..a8acf24
--- /dev/null
+++ b/test/fixtures/dialects/sqlite/select.yml
@@ -0,0 +1,25 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: c78e40a9f91a3af7db4e4f450f1b101cb68caa2a6f3999040a39ebab19ba53ab
+file:
+  statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          column_reference:
+            naked_identifier: a
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: foo
+      limit_clause:
+        keyword: LIMIT
+        numeric_literal: '10'
+  statement_terminator: ;
diff --git a/test/fixtures/dialects/teradata/collect_stats.sql b/test/fixtures/dialects/teradata/collect_stats.sql
index d13fe81..c2f85f8 100644
--- a/test/fixtures/dialects/teradata/collect_stats.sql
+++ b/test/fixtures/dialects/teradata/collect_stats.sql
@@ -1,3 +1,20 @@
 COLLECT STATISTICS COLUMN ( IND_TIPO_TARJETA ) ON DB_1.TABLE_1;
 
 COLLECT STATISTICS INDEX ( COD_TARJETA, COD_EST, IND_TIPO_TARJETA, FEC_ANIO_MES ) ON DB_1.TABLE_1;
+
+COLLECT STATISTICS
+COLUMN o_orderstatus
+ON orders;
+
+COLLECT STATISTICS
+    USING SYSTEM THRESHOLD FOR CURRENT
+    COLUMN (o_orderstatus, o_orderkey)
+    ON orders;
+	 
+COLLECT STATS COLUMN ( IND_TIPO_TARJETA ) ON DB_1.TABLE_1;
+
+COLLECT STAT COLUMN ( IND_TIPO_TARJETA ) ON DB_1.TABLE_1;
+
+COLLECT STATS COLUMN IND_TIPO_TARJETA ON DB_1.TABLE_1;
+
+COLLECT STATS INDEX ( COD_TARJETA, COD_EST, IND_TIPO_TARJETA, FEC_ANIO_MES ) ON DB_1.TABLE_1;
diff --git a/test/fixtures/dialects/teradata/collect_stats.yml b/test/fixtures/dialects/teradata/collect_stats.yml
index 0572764..58ae9ee 100644
--- a/test/fixtures/dialects/teradata/collect_stats.yml
+++ b/test/fixtures/dialects/teradata/collect_stats.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 38cc97435a4c56f52a92245df291dd02dcbfef74d01c9128bb47e4b3e10c68ca
+_hash: 79c36181d2695cfe097a7f516eb4f7505073d6886f5b0f7e37b98b45374a717b
 file:
 - statement:
     collect_statistics_statement:
@@ -46,3 +46,107 @@ file:
       - dot: .
       - naked_identifier: TABLE_1
 - statement_terminator: ;
+- statement:
+    collect_statistics_statement:
+    - keyword: COLLECT
+    - keyword: STATISTICS
+    - keyword: COLUMN
+    - column_reference:
+        naked_identifier: o_orderstatus
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: orders
+- statement_terminator: ;
+- statement:
+    collect_statistics_statement:
+    - keyword: COLLECT
+    - keyword: STATISTICS
+    - keyword: USING
+    - collect_stat_using_option_clause:
+      - keyword: SYSTEM
+      - keyword: THRESHOLD
+      - keyword: FOR
+      - keyword: CURRENT
+    - keyword: COLUMN
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: o_orderstatus
+      - comma: ','
+      - column_reference:
+          naked_identifier: o_orderkey
+      - end_bracket: )
+    - keyword: 'ON'
+    - table_reference:
+        naked_identifier: orders
+- statement_terminator: ;
+- statement:
+    collect_statistics_statement:
+    - keyword: COLLECT
+    - keyword: STATS
+    - keyword: COLUMN
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: IND_TIPO_TARJETA
+        end_bracket: )
+    - keyword: 'ON'
+    - table_reference:
+      - naked_identifier: DB_1
+      - dot: .
+      - naked_identifier: TABLE_1
+- statement_terminator: ;
+- statement:
+    collect_statistics_statement:
+    - keyword: COLLECT
+    - keyword: STAT
+    - keyword: COLUMN
+    - bracketed:
+        start_bracket: (
+        column_reference:
+          naked_identifier: IND_TIPO_TARJETA
+        end_bracket: )
+    - keyword: 'ON'
+    - table_reference:
+      - naked_identifier: DB_1
+      - dot: .
+      - naked_identifier: TABLE_1
+- statement_terminator: ;
+- statement:
+    collect_statistics_statement:
+    - keyword: COLLECT
+    - keyword: STATS
+    - keyword: COLUMN
+    - column_reference:
+        naked_identifier: IND_TIPO_TARJETA
+    - keyword: 'ON'
+    - table_reference:
+      - naked_identifier: DB_1
+      - dot: .
+      - naked_identifier: TABLE_1
+- statement_terminator: ;
+- statement:
+    collect_statistics_statement:
+    - keyword: COLLECT
+    - keyword: STATS
+    - keyword: INDEX
+    - bracketed:
+      - start_bracket: (
+      - column_reference:
+          naked_identifier: COD_TARJETA
+      - comma: ','
+      - column_reference:
+          naked_identifier: COD_EST
+      - comma: ','
+      - column_reference:
+          naked_identifier: IND_TIPO_TARJETA
+      - comma: ','
+      - column_reference:
+          naked_identifier: FEC_ANIO_MES
+      - end_bracket: )
+    - keyword: 'ON'
+    - table_reference:
+      - naked_identifier: DB_1
+      - dot: .
+      - naked_identifier: TABLE_1
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/teradata/comparison_operators.sql b/test/fixtures/dialects/teradata/comparison_operators.sql
new file mode 100644
index 0000000..0b260c5
--- /dev/null
+++ b/test/fixtures/dialects/teradata/comparison_operators.sql
@@ -0,0 +1,27 @@
+SELECT * FROM MY_TABLE WHERE A >= B;
+
+SELECT * FROM MY_TABLE WHERE A GE B;
+
+SELECT * FROM MY_TABLE WHERE A <= B;
+
+SELECT * FROM MY_TABLE WHERE A LE B;
+
+SELECT * FROM MY_TABLE WHERE A = B;
+
+SELECT * FROM MY_TABLE WHERE A EQ B;
+
+SELECT * FROM MY_TABLE WHERE A <> B;
+
+SELECT * FROM MY_TABLE WHERE A ^= B;
+
+SELECT * FROM MY_TABLE WHERE A NOT= B;
+
+SELECT * FROM MY_TABLE WHERE A NE B;
+
+SELECT * FROM MY_TABLE WHERE A GT B;
+
+SELECT * FROM MY_TABLE WHERE A > B;
+
+SELECT * FROM MY_TABLE WHERE A LT B;
+
+SELECT * FROM MY_TABLE WHERE A < B;
diff --git a/test/fixtures/dialects/teradata/comparison_operators.yml b/test/fixtures/dialects/teradata/comparison_operators.yml
new file mode 100644
index 0000000..fa70794
--- /dev/null
+++ b/test/fixtures/dialects/teradata/comparison_operators.yml
@@ -0,0 +1,356 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 6fe3b44345923a0b3030d15821fb486005acb9e39850430f6d05fd6af81a7679
+file:
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+            naked_identifier: A
+        - comparison_operator:
+          - raw_comparison_operator: '>'
+          - raw_comparison_operator: '='
+        - column_reference:
+            naked_identifier: B
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+            naked_identifier: A
+        - comparison_operator: GE
+        - column_reference:
+            naked_identifier: B
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+            naked_identifier: A
+        - comparison_operator:
+          - raw_comparison_operator: <
+          - raw_comparison_operator: '='
+        - column_reference:
+            naked_identifier: B
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+            naked_identifier: A
+        - comparison_operator: LE
+        - column_reference:
+            naked_identifier: B
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+            naked_identifier: A
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - column_reference:
+            naked_identifier: B
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+            naked_identifier: A
+        - comparison_operator: EQ
+        - column_reference:
+            naked_identifier: B
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+            naked_identifier: A
+        - comparison_operator:
+          - raw_comparison_operator: <
+          - raw_comparison_operator: '>'
+        - column_reference:
+            naked_identifier: B
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+            naked_identifier: A
+        - comparison_operator:
+            binary_operator: ^
+            raw_comparison_operator: '='
+        - column_reference:
+            naked_identifier: B
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+            naked_identifier: A
+        - comparison_operator:
+            keyword: NOT
+            raw_comparison_operator: '='
+        - column_reference:
+            naked_identifier: B
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+            naked_identifier: A
+        - comparison_operator: NE
+        - column_reference:
+            naked_identifier: B
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+            naked_identifier: A
+        - comparison_operator: GT
+        - column_reference:
+            naked_identifier: B
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+            naked_identifier: A
+        - comparison_operator:
+            raw_comparison_operator: '>'
+        - column_reference:
+            naked_identifier: B
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+            naked_identifier: A
+        - comparison_operator: LT
+        - column_reference:
+            naked_identifier: B
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+      where_clause:
+        keyword: WHERE
+        expression:
+        - column_reference:
+            naked_identifier: A
+        - comparison_operator:
+            raw_comparison_operator: <
+        - column_reference:
+            naked_identifier: B
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/teradata/create_table.yml b/test/fixtures/dialects/teradata/create_table.yml
index 0f9c58b..0b7aac9 100644
--- a/test/fixtures/dialects/teradata/create_table.yml
+++ b/test/fixtures/dialects/teradata/create_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 9575db20580324d3eeb9919f2fdebdb2174b8b78d41d59ccfe94dffa6e31f8e9
+_hash: 3badd344b1e26c3368e9102fa001ff4302206b72fb4594f4dda4a6611a08fd32
 file:
 - statement:
     create_table_statement:
@@ -20,11 +20,11 @@ file:
             naked_identifier: Org_Unit_Code
           data_type:
             data_type_identifier: char
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '6'
-              end_bracket: )
+                end_bracket: )
           td_column_attribute_constraint:
           - keyword: character
           - keyword: set
@@ -38,11 +38,11 @@ file:
             naked_identifier: Org_Unit_Type
           data_type:
             data_type_identifier: char
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '3'
-              end_bracket: )
+                end_bracket: )
           td_column_attribute_constraint:
           - keyword: character
           - keyword: set
@@ -56,11 +56,11 @@ file:
             naked_identifier: Entity_Code
           data_type:
             data_type_identifier: varchar
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '10'
-              end_bracket: )
+                end_bracket: )
           td_column_attribute_constraint:
             keyword: uppercase
           column_constraint_segment:
@@ -72,11 +72,11 @@ file:
             naked_identifier: Parent_Org_Unit_Code
           data_type:
             data_type_identifier: char
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '6'
-              end_bracket: )
+                end_bracket: )
           td_column_attribute_constraint:
           - keyword: character
           - keyword: set
@@ -90,11 +90,11 @@ file:
             naked_identifier: Parent_Org_Unit_Type
           data_type:
             data_type_identifier: char
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '3'
-              end_bracket: )
+                end_bracket: )
           td_column_attribute_constraint:
           - keyword: character
           - keyword: set
@@ -108,11 +108,11 @@ file:
             naked_identifier: Parent_Entity_Code
           data_type:
             data_type_identifier: varchar
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '10'
-              end_bracket: )
+                end_bracket: )
           td_column_attribute_constraint:
             keyword: uppercase
           column_constraint_segment:
diff --git a/test/fixtures/dialects/teradata/create_table_stmt.yml b/test/fixtures/dialects/teradata/create_table_stmt.yml
index 749eebe..21012a7 100644
--- a/test/fixtures/dialects/teradata/create_table_stmt.yml
+++ b/test/fixtures/dialects/teradata/create_table_stmt.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: ed3990efbc92aa72a08ed29dd6371d853cb3be43fab5668e69a1a2427b9ed575
+_hash: 21981d268a2697bb67da8fb4a6c9b4aa9ef7d9545a7b04259adea592591d3f92
 file:
   statement:
     create_table_statement:
@@ -39,11 +39,11 @@ file:
             naked_identifier: FIELD1
           data_type:
             data_type_identifier: CHAR
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '9'
-              end_bracket: )
+                end_bracket: )
         end_bracket: )
     - td_table_constraint:
       - keyword: PRIMARY
diff --git a/test/fixtures/dialects/teradata/create_table_stmt_2.yml b/test/fixtures/dialects/teradata/create_table_stmt_2.yml
index 38b768d..001d989 100644
--- a/test/fixtures/dialects/teradata/create_table_stmt_2.yml
+++ b/test/fixtures/dialects/teradata/create_table_stmt_2.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: bd33bf070f394474732e4a74bd1808082a78a7044c40b8841b064723f31f75a5
+_hash: 8b9d1e361194f1e8fa9e08200de67e0ca3b3b581d305f7577b5c9038fe0ca3ca
 file:
   statement:
     create_table_statement:
@@ -19,11 +19,11 @@ file:
             naked_identifier: CHAR_FIELD
         - data_type:
             data_type_identifier: CHAR
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '19'
-              end_bracket: )
+                end_bracket: )
         - td_column_attribute_constraint:
           - keyword: CHARACTER
           - keyword: SET
@@ -60,14 +60,13 @@ file:
             naked_identifier: DECIMAL_FIELD
           data_type:
             data_type_identifier: DECIMAL
-            bracketed:
-            - start_bracket: (
-            - expression:
-                numeric_literal: '15'
-            - comma: ','
-            - expression:
-                numeric_literal: '2'
-            - end_bracket: )
+            bracketed_arguments:
+              bracketed:
+              - start_bracket: (
+              - numeric_literal: '15'
+              - comma: ','
+              - numeric_literal: '2'
+              - end_bracket: )
           td_column_attribute_constraint:
             keyword: COMPRESS
             bracketed:
@@ -108,11 +107,11 @@ file:
             naked_identifier: TIMESTAMP_FIELD
           data_type:
             data_type_identifier: TIMESTAMP
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '6'
-              end_bracket: )
+                end_bracket: )
           column_constraint_segment:
           - keyword: NOT
           - keyword: 'NULL'
diff --git a/test/fixtures/dialects/teradata/create_table_stmt_3.yml b/test/fixtures/dialects/teradata/create_table_stmt_3.yml
index 2f56454..eec645d 100644
--- a/test/fixtures/dialects/teradata/create_table_stmt_3.yml
+++ b/test/fixtures/dialects/teradata/create_table_stmt_3.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: b7fbf02d7f6097837f16800fe669722f75a9d81baf345f63c6f04aa51a31fba0
+_hash: 2a1999394771b519f4daf4b3d620f1b4ea325dac0b388b5b6486149cadff9d39
 file:
   statement:
     create_table_statement:
@@ -19,11 +19,11 @@ file:
             naked_identifier: DES_EVENTO
         - data_type:
             data_type_identifier: VARCHAR
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '255'
-              end_bracket: )
+                end_bracket: )
         - td_column_attribute_constraint:
           - keyword: CHARACTER
           - keyword: SET
@@ -68,22 +68,22 @@ file:
             function_name_identifier: RANGE_N
           bracketed:
           - start_bracket: (
-          - raw: FEC_OPERACION
-          - raw: BETWEEN
-          - raw: DATE
+          - code: FEC_OPERACION
+          - code: BETWEEN
+          - code: DATE
           - single_quote: "'2007-01-01'"
-          - raw: AND
-          - raw: DATE
+          - code: AND
+          - code: DATE
           - single_quote: "'2022-01-01'"
-          - raw: EACH
-          - raw: INTERVAL
+          - code: EACH
+          - code: INTERVAL
           - single_quote: "'1'"
-          - raw: MONTH
+          - code: MONTH
           - comma: ','
-          - raw: 'NO'
-          - raw: RANGE
-          - raw: OR
-          - raw: UNKNOWN
+          - code: 'NO'
+          - code: RANGE
+          - code: OR
+          - code: UNKNOWN
           - end_bracket: )
       - keyword: INDEX
       - object_reference:
diff --git a/test/fixtures/dialects/teradata/create_table_stmt_4.yml b/test/fixtures/dialects/teradata/create_table_stmt_4.yml
index d6bd686..9b2d06c 100644
--- a/test/fixtures/dialects/teradata/create_table_stmt_4.yml
+++ b/test/fixtures/dialects/teradata/create_table_stmt_4.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 4b566ceea9d591b537356005ca503574fcfe0cdb0430c274aeb6af4f39dbdbdd
+_hash: bf62ad54c88397641a65ceae62c072d661544f2c0993695e44ea00582b7713ac
 file:
   statement:
     create_table_statement:
@@ -20,11 +20,11 @@ file:
             naked_identifier: Org_Unit_Code
           data_type:
             data_type_identifier: char
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '6'
-              end_bracket: )
+                end_bracket: )
           td_column_attribute_constraint:
           - keyword: character
           - keyword: set
@@ -38,11 +38,11 @@ file:
             naked_identifier: Org_Unit_Type
           data_type:
             data_type_identifier: char
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '3'
-              end_bracket: )
+                end_bracket: )
           td_column_attribute_constraint:
           - keyword: character
           - keyword: set
@@ -56,11 +56,11 @@ file:
             naked_identifier: Entity_Code
           data_type:
             data_type_identifier: varchar
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '10'
-              end_bracket: )
+                end_bracket: )
           td_column_attribute_constraint:
             keyword: uppercase
           column_constraint_segment:
@@ -72,11 +72,11 @@ file:
             naked_identifier: Parent_Org_Unit_Code
           data_type:
             data_type_identifier: char
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '6'
-              end_bracket: )
+                end_bracket: )
           td_column_attribute_constraint:
           - keyword: character
           - keyword: set
@@ -90,11 +90,11 @@ file:
             naked_identifier: Parent_Org_Unit_Type
           data_type:
             data_type_identifier: char
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '3'
-              end_bracket: )
+                end_bracket: )
           td_column_attribute_constraint:
           - keyword: character
           - keyword: set
@@ -108,11 +108,11 @@ file:
             naked_identifier: Parent_Entity_Code
           data_type:
             data_type_identifier: varchar
-            bracketed:
-              start_bracket: (
-              expression:
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
                 numeric_literal: '10'
-              end_bracket: )
+                end_bracket: )
           td_column_attribute_constraint:
             keyword: uppercase
           column_constraint_segment:
diff --git a/test/fixtures/dialects/teradata/create_table_with_data.sql b/test/fixtures/dialects/teradata/create_table_with_data.sql
index 134ba95..8b7d273 100644
--- a/test/fixtures/dialects/teradata/create_table_with_data.sql
+++ b/test/fixtures/dialects/teradata/create_table_with_data.sql
@@ -3,3 +3,11 @@ CREATE VOLATILE TABLE a AS (SELECT 'A' AS B) WITH DATA ON COMMIT PRESERVE ROWS;
 CREATE VOLATILE TABLE b AS (SELECT 'A' AS B) WITH DATA ON COMMIT DELETE ROWS;
 
 CREATE VOLATILE TABLE c AS (SELECT 'A' AS B) WITH NO DATA;
+
+CREATE VOLATILE TABLE e AS (SELECT 'A' AS B) WITH NO DATA AND STATS;
+
+CREATE VOLATILE TABLE f AS (SELECT 'A' AS B) WITH NO DATA AND NO STATS;
+
+CREATE VOLATILE TABLE g AS (SELECT 'A' AS B) WITH NO DATA AND STATISTICS;
+
+CREATE VOLATILE TABLE h AS (SELECT 'A' AS B) WITH NO DATA AND NO STATISTICS ON COMMIT PRESERVE ROWS;
diff --git a/test/fixtures/dialects/teradata/create_table_with_data.yml b/test/fixtures/dialects/teradata/create_table_with_data.yml
index f3294d6..b575b9a 100644
--- a/test/fixtures/dialects/teradata/create_table_with_data.yml
+++ b/test/fixtures/dialects/teradata/create_table_with_data.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 0d145b42e1c65fac3ae8baa67e1af6c8cf92b42aea57d3485a7e3cc785e38a33
+_hash: 4d792b0bbbda0f7cb7064bdc3967ad78ae41b51e7ef3585d7c61d1f5507afa28
 file:
 - statement:
     create_table_statement:
@@ -83,3 +83,113 @@ file:
       - keyword: 'NO'
       - keyword: DATA
 - statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: VOLATILE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: e
+    - keyword: AS
+    - bracketed:
+        start_bracket: (
+        select_statement:
+          select_clause:
+            keyword: SELECT
+            select_clause_element:
+              quoted_literal: "'A'"
+              alias_expression:
+                keyword: AS
+                naked_identifier: B
+        end_bracket: )
+    - td_table_constraint:
+      - keyword: WITH
+      - keyword: 'NO'
+      - keyword: DATA
+      - keyword: AND
+      - keyword: STATS
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: VOLATILE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: f
+    - keyword: AS
+    - bracketed:
+        start_bracket: (
+        select_statement:
+          select_clause:
+            keyword: SELECT
+            select_clause_element:
+              quoted_literal: "'A'"
+              alias_expression:
+                keyword: AS
+                naked_identifier: B
+        end_bracket: )
+    - td_table_constraint:
+      - keyword: WITH
+      - keyword: 'NO'
+      - keyword: DATA
+      - keyword: AND
+      - keyword: 'NO'
+      - keyword: STATS
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: VOLATILE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: g
+    - keyword: AS
+    - bracketed:
+        start_bracket: (
+        select_statement:
+          select_clause:
+            keyword: SELECT
+            select_clause_element:
+              quoted_literal: "'A'"
+              alias_expression:
+                keyword: AS
+                naked_identifier: B
+        end_bracket: )
+    - td_table_constraint:
+      - keyword: WITH
+      - keyword: 'NO'
+      - keyword: DATA
+      - keyword: AND
+      - keyword: STATISTICS
+- statement_terminator: ;
+- statement:
+    create_table_statement:
+    - keyword: CREATE
+    - keyword: VOLATILE
+    - keyword: TABLE
+    - table_reference:
+        naked_identifier: h
+    - keyword: AS
+    - bracketed:
+        start_bracket: (
+        select_statement:
+          select_clause:
+            keyword: SELECT
+            select_clause_element:
+              quoted_literal: "'A'"
+              alias_expression:
+                keyword: AS
+                naked_identifier: B
+        end_bracket: )
+    - td_table_constraint:
+      - keyword: WITH
+      - keyword: 'NO'
+      - keyword: DATA
+      - keyword: AND
+      - keyword: 'NO'
+      - keyword: STATISTICS
+      - keyword: 'ON'
+      - keyword: COMMIT
+      - keyword: PRESERVE
+      - keyword: ROWS
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/teradata/select_top.sql b/test/fixtures/dialects/teradata/select_top.sql
new file mode 100644
index 0000000..69659b6
--- /dev/null
+++ b/test/fixtures/dialects/teradata/select_top.sql
@@ -0,0 +1,17 @@
+SELECT TOP 100 * FROM MY_TABLE;
+
+SELECT * FROM MY_TABLE;
+
+SELECT TOP 100
+    COL_A,
+    COL_B
+FROM MY_TABLE;
+
+SELECT DISTINCT *
+FROM MY_TABLE;
+
+SELECT TOP 10 PERCENT * FROM MY_TABLE;
+
+SELECT TOP 0.1 PERCENT COL_A FROM MY_TABLE;
+
+SELECT TOP 0.1 PERCENT WITH TIES COL_A, COL_B FROM MY_TABLE ORDER BY COL_B;
diff --git a/test/fixtures/dialects/teradata/select_top.yml b/test/fixtures/dialects/teradata/select_top.yml
new file mode 100644
index 0000000..fd65d22
--- /dev/null
+++ b/test/fixtures/dialects/teradata/select_top.yml
@@ -0,0 +1,156 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 115f038980ab8782a6a505e2371ca12f1916731277a75af335b7c61439e9327b
+file:
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_modifier:
+          keyword: TOP
+          expression:
+            numeric_literal: '100'
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_modifier:
+          keyword: TOP
+          expression:
+            numeric_literal: '100'
+      - select_clause_element:
+          column_reference:
+            naked_identifier: COL_A
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: COL_B
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_modifier:
+          keyword: DISTINCT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_modifier:
+        - keyword: TOP
+        - expression:
+            numeric_literal: '10'
+        - keyword: PERCENT
+        select_clause_element:
+          wildcard_expression:
+            wildcard_identifier:
+              star: '*'
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+        keyword: SELECT
+        select_clause_modifier:
+        - keyword: TOP
+        - expression:
+            numeric_literal: '0.1'
+        - keyword: PERCENT
+        select_clause_element:
+          column_reference:
+            naked_identifier: COL_A
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+- statement_terminator: ;
+- statement:
+    select_statement:
+      select_clause:
+      - keyword: SELECT
+      - select_clause_modifier:
+        - keyword: TOP
+        - expression:
+            numeric_literal: '0.1'
+        - keyword: PERCENT
+        - keyword: WITH
+        - keyword: TIES
+      - select_clause_element:
+          column_reference:
+            naked_identifier: COL_A
+      - comma: ','
+      - select_clause_element:
+          column_reference:
+            naked_identifier: COL_B
+      from_clause:
+        keyword: FROM
+        from_expression:
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: MY_TABLE
+      orderby_clause:
+      - keyword: ORDER
+      - keyword: BY
+      - column_reference:
+          naked_identifier: COL_B
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/teradata/set_query_band.sql b/test/fixtures/dialects/teradata/set_query_band.sql
new file mode 100644
index 0000000..6ccca16
--- /dev/null
+++ b/test/fixtures/dialects/teradata/set_query_band.sql
@@ -0,0 +1,15 @@
+SET QUERY_BAND = 'cat=siamese;dog=akita;' 
+UPDATE FOR SESSION VOLATILE;
+
+SET QUERY_BAND = 'area=west;city=sandiego;tree=maple;flower=rose;' FOR SESSION;
+
+SET QUERY_BAND = 'city=san diego;' UPDATE FOR SESSION;
+
+SET QUERY_BAND='PROXYUSER=fred;'
+     FOR TRANSACTION;
+
+SET QUERY_BAND = NONE FOR TRANSACTION;
+
+SET QUERY_BAND=NONE FOR TRANSACTION;
+
+SET QUERY_BAND = '' FOR TRANSACTION;
diff --git a/test/fixtures/dialects/teradata/set_query_band.yml b/test/fixtures/dialects/teradata/set_query_band.yml
new file mode 100644
index 0000000..347d17f
--- /dev/null
+++ b/test/fixtures/dialects/teradata/set_query_band.yml
@@ -0,0 +1,80 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 06bd5bdc0a85a0086810f6436263bb2bd81376f46011bc3e08d4228aafd1baca
+file:
+- statement:
+    set_query_band_statement:
+    - keyword: SET
+    - keyword: QUERY_BAND
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'cat=siamese;dog=akita;'"
+    - keyword: UPDATE
+    - keyword: FOR
+    - keyword: SESSION
+    - keyword: VOLATILE
+- statement_terminator: ;
+- statement:
+    set_query_band_statement:
+    - keyword: SET
+    - keyword: QUERY_BAND
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'area=west;city=sandiego;tree=maple;flower=rose;'"
+    - keyword: FOR
+    - keyword: SESSION
+- statement_terminator: ;
+- statement:
+    set_query_band_statement:
+    - keyword: SET
+    - keyword: QUERY_BAND
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'city=san diego;'"
+    - keyword: UPDATE
+    - keyword: FOR
+    - keyword: SESSION
+- statement_terminator: ;
+- statement:
+    set_query_band_statement:
+    - keyword: SET
+    - keyword: QUERY_BAND
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "'PROXYUSER=fred;'"
+    - keyword: FOR
+    - keyword: TRANSACTION
+- statement_terminator: ;
+- statement:
+    set_query_band_statement:
+    - keyword: SET
+    - keyword: QUERY_BAND
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: NONE
+    - keyword: FOR
+    - keyword: TRANSACTION
+- statement_terminator: ;
+- statement:
+    set_query_band_statement:
+    - keyword: SET
+    - keyword: QUERY_BAND
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - keyword: NONE
+    - keyword: FOR
+    - keyword: TRANSACTION
+- statement_terminator: ;
+- statement:
+    set_query_band_statement:
+    - keyword: SET
+    - keyword: QUERY_BAND
+    - comparison_operator:
+        raw_comparison_operator: '='
+    - quoted_literal: "''"
+    - keyword: FOR
+    - keyword: TRANSACTION
+- statement_terminator: ;
diff --git a/test/fixtures/dialects/tsql/alter_index.sql b/test/fixtures/dialects/tsql/alter_index.sql
new file mode 100644
index 0000000..37ab3c7
--- /dev/null
+++ b/test/fixtures/dialects/tsql/alter_index.sql
@@ -0,0 +1,75 @@
+ALTER INDEX index1 ON table1 REBUILD;
+
+ALTER INDEX ALL ON table1 REBUILD;
+
+ALTER INDEX idxcci_cci_target ON cci_target REORGANIZE WITH (COMPRESS_ALL_ROW_GROUPS = ON);
+
+ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 REORGANIZE;
+
+ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 REORGANIZE PARTITION = 0;
+
+ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 REORGANIZE WITH (COMPRESS_ALL_ROW_GROUPS = ON);
+
+ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 REORGANIZE PARTITION = 0 WITH (COMPRESS_ALL_ROW_GROUPS = ON);
+
+ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 REORGANIZE;
+
+ALTER INDEX cci_fact3ON ON fact3 REBUILD PARTITION = 12;
+
+ALTER INDEX cci_SimpleTable ON SimpleTable
+REBUILD
+WITH (DATA_COMPRESSION = COLUMNSTORE_ARCHIVE);
+
+ALTER INDEX cci_SimpleTable ON SimpleTable
+REBUILD
+WITH (DATA_COMPRESSION = COLUMNSTORE);
+
+ALTER INDEX PK_ProductPhoto_ProductPhotoID ON Production.ProductPhoto REORGANIZE WITH (LOB_COMPACTION = ON);
+
+ALTER INDEX IX_Employee_ManagerID ON HumanResources.Employee DISABLE;
+
+ALTER INDEX IX_INDEX1
+ON T1
+REBUILD
+WITH (XML_COMPRESSION = ON);
+
+ALTER INDEX ALL ON Production.Product
+REBUILD WITH (FILLFACTOR = 80, SORT_IN_TEMPDB = ON, STATISTICS_NORECOMPUTE = ON);
+
+ALTER INDEX test_idx on test_table REBUILD WITH (ONLINE = ON, MAXDOP = 1, RESUMABLE = ON);
+
+ALTER INDEX test_idx on test_table PAUSE;
+
+ALTER INDEX test_idx on test_table ABORT;
+
+ALTER INDEX test_idx on test_table
+REBUILD WITH (XML_COMPRESSION = ON ON PARTITIONS (2, 4, 6 TO 8));
+
+ALTER INDEX test_idx on test_table
+REBUILD WITH (DATA_COMPRESSION = PAGE ON PARTITIONS (3, 5));
+
+ALTER INDEX test_idx on test_table
+REBUILD WITH (DATA_COMPRESSION = NONE ON PARTITIONS (1));
+
+ALTER INDEX IX_TransactionHistory_TransactionDate ON Production.TransactionHistory
+REBUILD Partition = 5
+   WITH (ONLINE = ON (WAIT_AT_LOW_PRIORITY (MAX_DURATION = 10, ABORT_AFTER_WAIT = SELF)));
+
+
+ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 SET (ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = OFF);
+
+ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 SET (OPTIMIZE_FOR_SEQUENTIAL_KEY  = ON,
+IGNORE_DUP_KEY  = OFF, STATISTICS_NORECOMPUTE = ON);
+
+ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 SET (COMPRESSION_DELAY = 0);
+
+ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 SET (COMPRESSION_DELAY = 100 minutes);
+
+ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 RESUME;
+
+ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 RESUME WITH (MAXDOP = 100, MAX_DURATION = 500 minutes,
+WAIT_AT_LOW_PRIORITY (MAX_DURATION = 10, ABORT_AFTER_WAIT = SELF));
+
+ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 RESUME WITH (MAX_DURATION = 500);
+
+ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 RESUME WITH (WAIT_AT_LOW_PRIORITY (MAX_DURATION = 10, ABORT_AFTER_WAIT = SELF));
diff --git a/test/fixtures/dialects/tsql/alter_index.yml b/test/fixtures/dialects/tsql/alter_index.yml
new file mode 100644
index 0000000..88bd888
--- /dev/null
+++ b/test/fixtures/dialects/tsql/alter_index.yml
@@ -0,0 +1,623 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: ae559c6095a99da7a6eaa94388edbca341d6980ecac15a47f886246db5ce0e8e
+file:
+  batch:
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: index1
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: table1
+      - keyword: REBUILD
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - keyword: ALL
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: table1
+      - keyword: REBUILD
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: idxcci_cci_target
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: cci_target
+      - keyword: REORGANIZE
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - keyword: COMPRESS_ALL_ROW_GROUPS
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_FactInternetSales2
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: FactInternetSales2
+      - keyword: REORGANIZE
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_FactInternetSales2
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: FactInternetSales2
+      - keyword: REORGANIZE
+      - keyword: PARTITION
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - numeric_literal: '0'
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_FactInternetSales2
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: FactInternetSales2
+      - keyword: REORGANIZE
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - keyword: COMPRESS_ALL_ROW_GROUPS
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_FactInternetSales2
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: FactInternetSales2
+      - keyword: REORGANIZE
+      - keyword: PARTITION
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - numeric_literal: '0'
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - keyword: COMPRESS_ALL_ROW_GROUPS
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_FactInternetSales2
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: FactInternetSales2
+      - keyword: REORGANIZE
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_fact3ON
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: fact3
+      - keyword: REBUILD
+      - keyword: PARTITION
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - numeric_literal: '12'
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_SimpleTable
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: SimpleTable
+      - keyword: REBUILD
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - keyword: DATA_COMPRESSION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: COLUMNSTORE_ARCHIVE
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_SimpleTable
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: SimpleTable
+      - keyword: REBUILD
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - keyword: DATA_COMPRESSION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: COLUMNSTORE
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: PK_ProductPhoto_ProductPhotoID
+      - keyword: 'ON'
+      - table_reference:
+        - naked_identifier: Production
+        - dot: .
+        - naked_identifier: ProductPhoto
+      - keyword: REORGANIZE
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - keyword: LOB_COMPACTION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: IX_Employee_ManagerID
+      - keyword: 'ON'
+      - table_reference:
+        - naked_identifier: HumanResources
+        - dot: .
+        - naked_identifier: Employee
+      - keyword: DISABLE
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: IX_INDEX1
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: T1
+      - keyword: REBUILD
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - keyword: XML_COMPRESSION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - keyword: ALL
+      - keyword: 'ON'
+      - table_reference:
+        - naked_identifier: Production
+        - dot: .
+        - naked_identifier: Product
+      - keyword: REBUILD
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - keyword: FILLFACTOR
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - numeric_literal: '80'
+        - comma: ','
+        - keyword: SORT_IN_TEMPDB
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - comma: ','
+        - keyword: STATISTICS_NORECOMPUTE
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: test_idx
+      - keyword: 'on'
+      - table_reference:
+          naked_identifier: test_table
+      - keyword: REBUILD
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - keyword: ONLINE
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - comma: ','
+        - keyword: MAXDOP
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - numeric_literal: '1'
+        - comma: ','
+        - keyword: RESUMABLE
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: test_idx
+      - keyword: 'on'
+      - table_reference:
+          naked_identifier: test_table
+      - keyword: PAUSE
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: test_idx
+      - keyword: 'on'
+      - table_reference:
+          naked_identifier: test_table
+      - keyword: ABORT
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: test_idx
+      - keyword: 'on'
+      - table_reference:
+          naked_identifier: test_table
+      - keyword: REBUILD
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - keyword: XML_COMPRESSION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - keyword: 'ON'
+        - keyword: PARTITIONS
+        - bracketed:
+          - start_bracket: (
+          - numeric_literal: '2'
+          - comma: ','
+          - numeric_literal: '4'
+          - comma: ','
+          - numeric_literal: '6'
+          - keyword: TO
+          - numeric_literal: '8'
+          - end_bracket: )
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: test_idx
+      - keyword: 'on'
+      - table_reference:
+          naked_identifier: test_table
+      - keyword: REBUILD
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - keyword: DATA_COMPRESSION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: PAGE
+        - keyword: 'ON'
+        - keyword: PARTITIONS
+        - bracketed:
+          - start_bracket: (
+          - numeric_literal: '3'
+          - comma: ','
+          - numeric_literal: '5'
+          - end_bracket: )
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: test_idx
+      - keyword: 'on'
+      - table_reference:
+          naked_identifier: test_table
+      - keyword: REBUILD
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - keyword: DATA_COMPRESSION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: NONE
+        - keyword: 'ON'
+        - keyword: PARTITIONS
+        - bracketed:
+            start_bracket: (
+            numeric_literal: '1'
+            end_bracket: )
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: IX_TransactionHistory_TransactionDate
+      - keyword: 'ON'
+      - table_reference:
+        - naked_identifier: Production
+        - dot: .
+        - naked_identifier: TransactionHistory
+      - keyword: REBUILD
+      - keyword: Partition
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - numeric_literal: '5'
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - keyword: ONLINE
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - bracketed:
+            start_bracket: (
+            keyword: WAIT_AT_LOW_PRIORITY
+            bracketed:
+            - start_bracket: (
+            - keyword: MAX_DURATION
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - numeric_literal: '10'
+            - comma: ','
+            - keyword: ABORT_AFTER_WAIT
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - keyword: SELF
+            - end_bracket: )
+            end_bracket: )
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_FactInternetSales2
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: FactInternetSales2
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: ALLOW_ROW_LOCKS
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - comma: ','
+        - keyword: ALLOW_PAGE_LOCKS
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'OFF'
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_FactInternetSales2
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: FactInternetSales2
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: OPTIMIZE_FOR_SEQUENTIAL_KEY
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - comma: ','
+        - keyword: IGNORE_DUP_KEY
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'OFF'
+        - comma: ','
+        - keyword: STATISTICS_NORECOMPUTE
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_FactInternetSales2
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: FactInternetSales2
+      - keyword: SET
+      - bracketed:
+          start_bracket: (
+          keyword: COMPRESSION_DELAY
+          comparison_operator:
+            raw_comparison_operator: '='
+          numeric_literal: '0'
+          end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_FactInternetSales2
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: FactInternetSales2
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: COMPRESSION_DELAY
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - numeric_literal: '100'
+        - keyword: minutes
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_FactInternetSales2
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: FactInternetSales2
+      - keyword: RESUME
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_FactInternetSales2
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: FactInternetSales2
+      - keyword: RESUME
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - keyword: MAXDOP
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - numeric_literal: '100'
+        - comma: ','
+        - keyword: MAX_DURATION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - numeric_literal: '500'
+        - keyword: minutes
+        - comma: ','
+        - keyword: WAIT_AT_LOW_PRIORITY
+        - bracketed:
+          - start_bracket: (
+          - keyword: MAX_DURATION
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - numeric_literal: '10'
+          - comma: ','
+          - keyword: ABORT_AFTER_WAIT
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: SELF
+          - end_bracket: )
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_FactInternetSales2
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: FactInternetSales2
+      - keyword: RESUME
+      - keyword: WITH
+      - bracketed:
+          start_bracket: (
+          keyword: MAX_DURATION
+          comparison_operator:
+            raw_comparison_operator: '='
+          numeric_literal: '500'
+          end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      alter_index_statement:
+      - keyword: ALTER
+      - keyword: INDEX
+      - object_reference:
+          naked_identifier: cci_FactInternetSales2
+      - keyword: 'ON'
+      - table_reference:
+          naked_identifier: FactInternetSales2
+      - keyword: RESUME
+      - keyword: WITH
+      - bracketed:
+          start_bracket: (
+          keyword: WAIT_AT_LOW_PRIORITY
+          bracketed:
+          - start_bracket: (
+          - keyword: MAX_DURATION
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - numeric_literal: '10'
+          - comma: ','
+          - keyword: ABORT_AFTER_WAIT
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: SELF
+          - end_bracket: )
+          end_bracket: )
+  - statement_terminator: ;
diff --git a/test/fixtures/dialects/tsql/alter_table.sql b/test/fixtures/dialects/tsql/alter_table.sql
index d89c306..22f73af 100644
--- a/test/fixtures/dialects/tsql/alter_table.sql
+++ b/test/fixtures/dialects/tsql/alter_table.sql
@@ -49,3 +49,58 @@ GO
 ALTER TABLE [Production].[ProductCostHistory]
 CHECK CONSTRAINT [FK_ProductCostHistory_Product_ProductID]
 GO
+
+ALTER TABLE my_table
+ADD my_col_1 INT
+  , my_col_2 INT
+GO
+
+ALTER TABLE TestTable SET (SYSTEM_VERSIONING = ON); GO
+ALTER TABLE TestTable SET (SYSTEM_VERSIONING = OFF); GO
+
+ALTER TABLE TestTable SET
+  (SYSTEM_VERSIONING = OFF (
+    HISTORY_TABLE = TestTableHistory
+  ));
+GO
+
+ALTER TABLE TestTable SET
+  (SYSTEM_VERSIONING = OFF (
+    HISTORY_TABLE = TestTableHistory,
+    DATA_CONSISTENCY_CHECK = ON
+  ));
+GO
+
+ALTER TABLE TestTable SET
+  (SYSTEM_VERSIONING = OFF (
+    HISTORY_TABLE = TestTableHistory,
+    DATA_CONSISTENCY_CHECK = ON,
+    HISTORY_RETENTION_PERIOD = INFINITE
+  ));
+GO
+
+ALTER TABLE TestTable SET
+  (SYSTEM_VERSIONING = OFF (
+    HISTORY_TABLE = TestTableHistory,
+    DATA_CONSISTENCY_CHECK = ON,
+    HISTORY_RETENTION_PERIOD = 1 YEAR
+  ));
+GO
+
+ALTER TABLE TestTable SET
+  (SYSTEM_VERSIONING = OFF (
+    HISTORY_TABLE = TestTableHistory,
+    DATA_CONSISTENCY_CHECK = ON,
+    HISTORY_RETENTION_PERIOD = 7 MONTHS
+  ));
+GO
+
+ALTER TABLE TestTable SET (FILESTREAM_ON = "NULL"); GO
+ALTER TABLE TestTable SET (FILESTREAM_ON = "default"); GO
+ALTER TABLE TestTable SET (FILESTREAM_ON = PartitionSchemeName); GO
+ALTER TABLE TestTable SET (DATA_DELETION = ON); GO
+ALTER TABLE TestTable SET (DATA_DELETION = OFF(FILTER_COLUMN = ColumnName)); GO
+ALTER TABLE TestTable SET (DATA_DELETION = OFF(FILTER_COLUMN = ColumnName, RETENTION_PERIOD = 1 YEAR)); GO
+ALTER TABLE TestTable SET (DATA_DELETION = OFF(FILTER_COLUMN = ColumnName, RETENTION_PERIOD = INFINITE)); GO
+ALTER TABLE TestTable SET (DATA_DELETION = OFF(FILTER_COLUMN = ColumnName, RETENTION_PERIOD = 7 YEARS)); GO
+ALTER TABLE TestTable SET (DATA_DELETION = OFF(FILTER_COLUMN = ColumnName, RETENTION_PERIOD = 7 DAYS)); GO
diff --git a/test/fixtures/dialects/tsql/alter_table.yml b/test/fixtures/dialects/tsql/alter_table.yml
index d0c0b96..9f33bea 100644
--- a/test/fixtures/dialects/tsql/alter_table.yml
+++ b/test/fixtures/dialects/tsql/alter_table.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: bbbad5fb91e122e0f46727338e622d1b17156cc1f6123ecbfa43d4b7ff1067c6
+_hash: 4e240e41dde1c884915f6b1bed1d5c8af6fcaa7437f9962ef42d9b1070ca6504
 file:
 - batch:
     statement:
@@ -38,11 +38,12 @@ file:
           naked_identifier: column_b
           data_type:
             data_type_identifier: VARCHAR
-            bracketed:
-              start_bracket: (
-              expression:
-                numeric_literal: '20'
-              end_bracket: )
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                expression:
+                  numeric_literal: '20'
+                end_bracket: )
           column_constraint_segment:
             keyword: 'NULL'
     statement_terminator: ;
@@ -81,11 +82,12 @@ file:
         - naked_identifier: column_b
         - data_type:
             data_type_identifier: VARCHAR
-            bracketed:
-              start_bracket: (
-              expression:
-                numeric_literal: '20'
-              end_bracket: )
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                expression:
+                  numeric_literal: '20'
+                end_bracket: )
         - column_constraint_segment:
             keyword: 'NULL'
         - column_constraint_segment:
@@ -297,11 +299,12 @@ file:
           naked_identifier: rec_number
           data_type:
             data_type_identifier: VARCHAR
-            bracketed:
-              start_bracket: (
-              expression:
-                numeric_literal: '36'
-              end_bracket: )
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                expression:
+                  numeric_literal: '36'
+                end_bracket: )
 - go_statement:
     keyword: GO
 - batch:
@@ -367,3 +370,453 @@ file:
           quoted_identifier: '[FK_ProductCostHistory_Product_ProductID]'
 - go_statement:
     keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: my_table
+      - keyword: ADD
+      - column_definition:
+          naked_identifier: my_col_1
+          data_type:
+            data_type_identifier: INT
+      - comma: ','
+      - column_definition:
+          naked_identifier: my_col_2
+          data_type:
+            data_type_identifier: INT
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: SYSTEM_VERSIONING
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: SYSTEM_VERSIONING
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'OFF'
+        - end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: SYSTEM_VERSIONING
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'OFF'
+        - bracketed:
+            start_bracket: (
+            keyword: HISTORY_TABLE
+            comparison_operator:
+              raw_comparison_operator: '='
+            table_reference:
+              naked_identifier: TestTableHistory
+            end_bracket: )
+        - end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: SYSTEM_VERSIONING
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'OFF'
+        - bracketed:
+          - start_bracket: (
+          - keyword: HISTORY_TABLE
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - table_reference:
+              naked_identifier: TestTableHistory
+          - comma: ','
+          - keyword: DATA_CONSISTENCY_CHECK
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'ON'
+          - end_bracket: )
+        - end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: SYSTEM_VERSIONING
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'OFF'
+        - bracketed:
+          - start_bracket: (
+          - keyword: HISTORY_TABLE
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - table_reference:
+              naked_identifier: TestTableHistory
+          - comma: ','
+          - keyword: DATA_CONSISTENCY_CHECK
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'ON'
+          - comma: ','
+          - keyword: HISTORY_RETENTION_PERIOD
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - date_part: INFINITE
+          - end_bracket: )
+        - end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: SYSTEM_VERSIONING
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'OFF'
+        - bracketed:
+          - start_bracket: (
+          - keyword: HISTORY_TABLE
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - table_reference:
+              naked_identifier: TestTableHistory
+          - comma: ','
+          - keyword: DATA_CONSISTENCY_CHECK
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'ON'
+          - comma: ','
+          - keyword: HISTORY_RETENTION_PERIOD
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - numeric_literal: '1'
+          - date_part: YEAR
+          - end_bracket: )
+        - end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: SYSTEM_VERSIONING
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'OFF'
+        - bracketed:
+          - start_bracket: (
+          - keyword: HISTORY_TABLE
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - table_reference:
+              naked_identifier: TestTableHistory
+          - comma: ','
+          - keyword: DATA_CONSISTENCY_CHECK
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'ON'
+          - comma: ','
+          - keyword: HISTORY_RETENTION_PERIOD
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - numeric_literal: '7'
+          - date_part: MONTHS
+          - end_bracket: )
+        - end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+          start_bracket: (
+          keyword: FILESTREAM_ON
+          comparison_operator:
+            raw_comparison_operator: '='
+          filegroup_name:
+            quoted_identifier: '"NULL"'
+          end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+          start_bracket: (
+          keyword: FILESTREAM_ON
+          comparison_operator:
+            raw_comparison_operator: '='
+          filegroup_name:
+            quoted_identifier: '"default"'
+          end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+          start_bracket: (
+          keyword: FILESTREAM_ON
+          comparison_operator:
+            raw_comparison_operator: '='
+          filegroup_name:
+            naked_identifier: PartitionSchemeName
+          end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: DATA_DELETION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: DATA_DELETION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'OFF'
+        - bracketed:
+            start_bracket: (
+            keyword: FILTER_COLUMN
+            comparison_operator:
+              raw_comparison_operator: '='
+            column_reference:
+              naked_identifier: ColumnName
+            end_bracket: )
+        - end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: DATA_DELETION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'OFF'
+        - bracketed:
+          - start_bracket: (
+          - keyword: FILTER_COLUMN
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - column_reference:
+              naked_identifier: ColumnName
+          - comma: ','
+          - keyword: RETENTION_PERIOD
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - numeric_literal: '1'
+          - date_part: YEAR
+          - end_bracket: )
+        - end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: DATA_DELETION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'OFF'
+        - bracketed:
+          - start_bracket: (
+          - keyword: FILTER_COLUMN
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - column_reference:
+              naked_identifier: ColumnName
+          - comma: ','
+          - keyword: RETENTION_PERIOD
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - date_part: INFINITE
+          - end_bracket: )
+        - end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: DATA_DELETION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'OFF'
+        - bracketed:
+          - start_bracket: (
+          - keyword: FILTER_COLUMN
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - column_reference:
+              naked_identifier: ColumnName
+          - comma: ','
+          - keyword: RETENTION_PERIOD
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - numeric_literal: '7'
+          - date_part: YEARS
+          - end_bracket: )
+        - end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      alter_table_statement:
+      - keyword: ALTER
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: TestTable
+      - keyword: SET
+      - bracketed:
+        - start_bracket: (
+        - keyword: DATA_DELETION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'OFF'
+        - bracketed:
+          - start_bracket: (
+          - keyword: FILTER_COLUMN
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - column_reference:
+              naked_identifier: ColumnName
+          - comma: ','
+          - keyword: RETENTION_PERIOD
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - numeric_literal: '7'
+          - date_part: DAYS
+          - end_bracket: )
+        - end_bracket: )
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
diff --git a/test/fixtures/dialects/tsql/bulk_insert.sql b/test/fixtures/dialects/tsql/bulk_insert.sql
new file mode 100644
index 0000000..4c9466e
--- /dev/null
+++ b/test/fixtures/dialects/tsql/bulk_insert.sql
@@ -0,0 +1,13 @@
+-- Plain BULK insert
+BULK INSERT my_schema.my_table
+FROM 'data.csv';
+
+-- BULK insert with options
+BULK INSERT my_schema.my_table
+FROM 'data.csv'
+WITH (
+    BATCHSIZE = 1024,
+    CHECK_CONSTRAINTS,
+    ORDER (col1 ASC, col2 DESC),
+    FORMAT = 'CSV'
+);
diff --git a/test/fixtures/dialects/tsql/bulk_insert.yml b/test/fixtures/dialects/tsql/bulk_insert.yml
new file mode 100644
index 0000000..aa7b578
--- /dev/null
+++ b/test/fixtures/dialects/tsql/bulk_insert.yml
@@ -0,0 +1,58 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 052ffa5a983c0b3920c31222e9477343d61d42569770c2ff4e8b70e407065140
+file:
+  batch:
+  - statement:
+      bulk_insert_statement:
+      - keyword: BULK
+      - keyword: INSERT
+      - table_reference:
+        - naked_identifier: my_schema
+        - dot: .
+        - naked_identifier: my_table
+      - keyword: FROM
+      - quoted_literal: "'data.csv'"
+  - statement_terminator: ;
+  - statement:
+      bulk_insert_statement:
+      - keyword: BULK
+      - keyword: INSERT
+      - table_reference:
+        - naked_identifier: my_schema
+        - dot: .
+        - naked_identifier: my_table
+      - keyword: FROM
+      - quoted_literal: "'data.csv'"
+      - bulk_insert_with_segment:
+          keyword: WITH
+          bracketed:
+          - start_bracket: (
+          - keyword: BATCHSIZE
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - numeric_literal: '1024'
+          - comma: ','
+          - keyword: CHECK_CONSTRAINTS
+          - comma: ','
+          - keyword: ORDER
+          - bracketed:
+            - start_bracket: (
+            - column_reference:
+                naked_identifier: col1
+            - keyword: ASC
+            - comma: ','
+            - column_reference:
+                naked_identifier: col2
+            - keyword: DESC
+            - end_bracket: )
+          - comma: ','
+          - keyword: FORMAT
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - quoted_literal: "'CSV'"
+          - end_bracket: )
+  - statement_terminator: ;
diff --git a/test/fixtures/dialects/tsql/cast_variable.yml b/test/fixtures/dialects/tsql/cast_variable.yml
index 8e4874b..46b5ea6 100644
--- a/test/fixtures/dialects/tsql/cast_variable.yml
+++ b/test/fixtures/dialects/tsql/cast_variable.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: bc6640b52894d99c9c6bb382dcaa1de71630c65eaa51007683569191d4fa7057
+_hash: 935199c7ac89c3a42afe824da3a00b3541e19ed276476b2ef8cd50e05ab7c023
 file:
   batch:
   - statement:
@@ -69,11 +69,12 @@ file:
                 keyword: as
                 data_type:
                   data_type_identifier: datetime2
-                  bracketed:
-                    start_bracket: (
-                    expression:
-                      numeric_literal: '7'
-                    end_bracket: )
+                  bracketed_arguments:
+                    bracketed:
+                      start_bracket: (
+                      expression:
+                        numeric_literal: '7'
+                      end_bracket: )
                 end_bracket: )
             alias_expression:
               keyword: as
@@ -94,10 +95,11 @@ file:
         parameter: '@sample'
         data_type:
           data_type_identifier: nvarchar
-          bracketed:
-            start_bracket: (
-            keyword: max
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              keyword: max
+              end_bracket: )
         comparison_operator:
           raw_comparison_operator: '='
         expression:
@@ -111,8 +113,9 @@ file:
               keyword: as
               data_type:
                 data_type_identifier: nvarchar
-                bracketed:
-                  start_bracket: (
-                  keyword: max
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    keyword: max
+                    end_bracket: )
               end_bracket: )
diff --git a/test/fixtures/dialects/tsql/convert.yml b/test/fixtures/dialects/tsql/convert.yml
index 523a2d6..a487018 100644
--- a/test/fixtures/dialects/tsql/convert.yml
+++ b/test/fixtures/dialects/tsql/convert.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 333083802ef106d5d7512ee256d7e2e6856582381f86e757c0738a03e997edaa
+_hash: 4473a6434c9f0ce4b91fae0d3d548d74f7374294e65e6d08f3e5d5c60f2bdd62
 file:
   batch:
     statement:
@@ -18,11 +18,12 @@ file:
                 start_bracket: (
                 data_type:
                   data_type_identifier: nvarchar
-                  bracketed:
-                    start_bracket: (
-                    expression:
-                      numeric_literal: '100'
-                    end_bracket: )
+                  bracketed_arguments:
+                    bracketed:
+                      start_bracket: (
+                      expression:
+                        numeric_literal: '100'
+                      end_bracket: )
                 comma: ','
                 expression:
                   column_reference:
diff --git a/test/fixtures/dialects/tsql/create_database_scoped_credential.sql b/test/fixtures/dialects/tsql/create_database_scoped_credential.sql
new file mode 100644
index 0000000..2106d2e
--- /dev/null
+++ b/test/fixtures/dialects/tsql/create_database_scoped_credential.sql
@@ -0,0 +1,4 @@
+CREATE DATABASE SCOPED CREDENTIAL AppCred WITH IDENTITY = 'Mary5';
+
+CREATE DATABASE SCOPED CREDENTIAL AppCred WITH IDENTITY = 'Mary5',
+    SECRET = '<EnterStrongPasswordHere>';
diff --git a/test/fixtures/dialects/tsql/create_database_scoped_credential.yml b/test/fixtures/dialects/tsql/create_database_scoped_credential.yml
new file mode 100644
index 0000000..d9d96fa
--- /dev/null
+++ b/test/fixtures/dialects/tsql/create_database_scoped_credential.yml
@@ -0,0 +1,41 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 990a242afa79affe3e73f08022a569ce596765b1a3151a411810c97e5680db92
+file:
+  batch:
+  - statement:
+      create_database_scoped_credential_statement:
+      - keyword: CREATE
+      - keyword: DATABASE
+      - keyword: SCOPED
+      - keyword: CREDENTIAL
+      - object_reference:
+          naked_identifier: AppCred
+      - keyword: WITH
+      - keyword: IDENTITY
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'Mary5'"
+  - statement_terminator: ;
+  - statement:
+      create_database_scoped_credential_statement:
+      - keyword: CREATE
+      - keyword: DATABASE
+      - keyword: SCOPED
+      - keyword: CREDENTIAL
+      - object_reference:
+          naked_identifier: AppCred
+      - keyword: WITH
+      - keyword: IDENTITY
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'Mary5'"
+      - comma: ','
+      - keyword: SECRET
+      - comparison_operator:
+          raw_comparison_operator: '='
+      - quoted_literal: "'<EnterStrongPasswordHere>'"
+  - statement_terminator: ;
diff --git a/test/fixtures/dialects/tsql/create_external_data_source.sql b/test/fixtures/dialects/tsql/create_external_data_source.sql
new file mode 100644
index 0000000..38ea328
--- /dev/null
+++ b/test/fixtures/dialects/tsql/create_external_data_source.sql
@@ -0,0 +1,20 @@
+CREATE EXTERNAL DATA SOURCE MyOracleServer
+WITH (
+  LOCATION = 'oracle://145.145.145.145:1521',
+  CREDENTIAL = OracleProxyAccount,
+  PUSHDOWN = ON
+);
+
+CREATE EXTERNAL DATA SOURCE [OracleSalesSrvr]
+WITH (
+  LOCATION = 'oracle://145.145.145.145:1521',
+  CONNECTION_OPTIONS = 'ImpersonateUser=%CURRENT_USER',
+  CREDENTIAL = [OracleProxyCredential]
+);
+
+CREATE EXTERNAL DATA SOURCE [external_data_source_name]
+WITH (
+  LOCATION = N'oracle://XE', 
+  CREDENTIAL = [OracleCredentialTest], 
+  CONNECTION_OPTIONS = N'TNSNamesFile=C:\Temp\tnsnames.ora;ServerName=XE'
+);
\ No newline at end of file
diff --git a/test/fixtures/dialects/tsql/create_external_data_source.yml b/test/fixtures/dialects/tsql/create_external_data_source.yml
new file mode 100644
index 0000000..d9014b9
--- /dev/null
+++ b/test/fixtures/dialects/tsql/create_external_data_source.yml
@@ -0,0 +1,95 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 4ee9e14852d24196e56654034afae16e6b08f5929988bd7701cd477429ca1e3a
+file:
+  batch:
+  - statement:
+      create_external_data_source_statement:
+      - keyword: CREATE
+      - keyword: EXTERNAL
+      - keyword: DATA
+      - keyword: SOURCE
+      - object_reference:
+          naked_identifier: MyOracleServer
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - table_location_clause:
+            keyword: LOCATION
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'oracle://145.145.145.145:1521'"
+        - comma: ','
+        - keyword: CREDENTIAL
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - object_reference:
+            naked_identifier: OracleProxyAccount
+        - comma: ','
+        - keyword: PUSHDOWN
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: 'ON'
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      create_external_data_source_statement:
+      - keyword: CREATE
+      - keyword: EXTERNAL
+      - keyword: DATA
+      - keyword: SOURCE
+      - object_reference:
+          quoted_identifier: '[OracleSalesSrvr]'
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - table_location_clause:
+            keyword: LOCATION
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'oracle://145.145.145.145:1521'"
+        - comma: ','
+        - keyword: CONNECTION_OPTIONS
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - quoted_literal: "'ImpersonateUser=%CURRENT_USER'"
+        - comma: ','
+        - keyword: CREDENTIAL
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - object_reference:
+            quoted_identifier: '[OracleProxyCredential]'
+        - end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      create_external_data_source_statement:
+      - keyword: CREATE
+      - keyword: EXTERNAL
+      - keyword: DATA
+      - keyword: SOURCE
+      - object_reference:
+          quoted_identifier: '[external_data_source_name]'
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - table_location_clause:
+            keyword: LOCATION
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "N'oracle://XE'"
+        - comma: ','
+        - keyword: CREDENTIAL
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - object_reference:
+            quoted_identifier: '[OracleCredentialTest]'
+        - comma: ','
+        - keyword: CONNECTION_OPTIONS
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - quoted_literal: "N'TNSNamesFile=C:\\Temp\\tnsnames.ora;ServerName=XE'"
+        - end_bracket: )
+  - statement_terminator: ;
diff --git a/test/fixtures/dialects/tsql/create_external_file_format.sql b/test/fixtures/dialects/tsql/create_external_file_format.sql
new file mode 100644
index 0000000..5de8ebb
--- /dev/null
+++ b/test/fixtures/dialects/tsql/create_external_file_format.sql
@@ -0,0 +1,54 @@
+/*
+https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=delimited#examples
+*/
+
+CREATE EXTERNAL FILE FORMAT textdelimited1
+WITH (
+    FORMAT_TYPE = DELIMITEDTEXT,
+    FORMAT_OPTIONS (
+        FIELD_TERMINATOR = '|',
+        DATE_FORMAT = 'MM/dd/yyyy'
+    ),
+    DATA_COMPRESSION = 'org.apache.hadoop.io.compress.GzipCodec'
+);
+
+CREATE EXTERNAL FILE FORMAT skipHeader_CSV
+WITH (
+    FORMAT_TYPE = DELIMITEDTEXT,
+    FORMAT_OPTIONS (
+          FIELD_TERMINATOR = ',',
+          STRING_DELIMITER = '"',
+          FIRST_ROW = 2,
+          USE_TYPE_DEFAULT = True
+    )
+);
+
+CREATE EXTERNAL FILE FORMAT [rcfile1]
+WITH (
+    FORMAT_TYPE = RCFILE,
+    SERDE_METHOD = 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe',
+    DATA_COMPRESSION = 'org.apache.hadoop.io.compress.DefaultCodec'
+);
+
+CREATE EXTERNAL FILE FORMAT orcfile1
+WITH (
+    FORMAT_TYPE = ORC,
+    DATA_COMPRESSION = 'org.apache.hadoop.io.compress.SnappyCodec'
+);
+
+CREATE EXTERNAL FILE FORMAT parquetfile1
+WITH (
+    FORMAT_TYPE = PARQUET,
+    DATA_COMPRESSION = 'org.apache.hadoop.io.compress.SnappyCodec'
+);
+
+CREATE EXTERNAL FILE FORMAT jsonFileFormat
+WITH (
+    FORMAT_TYPE = JSON,
+    DATA_COMPRESSION = 'org.apache.hadoop.io.compress.SnappyCodec'
+);
+
+CREATE EXTERNAL FILE FORMAT DeltaFileFormat
+WITH (
+    FORMAT_TYPE = DELTA
+);
diff --git a/test/fixtures/dialects/tsql/create_external_file_format.yml b/test/fixtures/dialects/tsql/create_external_file_format.yml
new file mode 100644
index 0000000..f9aa5c2
--- /dev/null
+++ b/test/fixtures/dialects/tsql/create_external_file_format.yml
@@ -0,0 +1,208 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: c3c9eb5f60f14a124a441e27bdcdfed2cd507bd99e30502ab64ce6abf3c0d8bc
+file:
+  batch:
+  - statement:
+      create_external_file_format:
+      - keyword: CREATE
+      - keyword: EXTERNAL
+      - keyword: FILE
+      - keyword: FORMAT
+      - object_reference:
+          naked_identifier: textdelimited1
+      - keyword: WITH
+      - bracketed:
+          start_bracket: (
+          external_file_delimited_text_clause:
+          - keyword: FORMAT_TYPE
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: DELIMITEDTEXT
+          - comma: ','
+          - keyword: FORMAT_OPTIONS
+          - bracketed:
+            - start_bracket: (
+            - external_file_delimited_text_format_options_clause:
+                keyword: FIELD_TERMINATOR
+                comparison_operator:
+                  raw_comparison_operator: '='
+                quoted_literal: "'|'"
+            - comma: ','
+            - external_file_delimited_text_format_options_clause:
+                keyword: DATE_FORMAT
+                comparison_operator:
+                  raw_comparison_operator: '='
+                quoted_literal: "'MM/dd/yyyy'"
+            - end_bracket: )
+          - comma: ','
+          - keyword: DATA_COMPRESSION
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - file_compression: "'org.apache.hadoop.io.compress.GzipCodec'"
+          end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      create_external_file_format:
+      - keyword: CREATE
+      - keyword: EXTERNAL
+      - keyword: FILE
+      - keyword: FORMAT
+      - object_reference:
+          naked_identifier: skipHeader_CSV
+      - keyword: WITH
+      - bracketed:
+          start_bracket: (
+          external_file_delimited_text_clause:
+          - keyword: FORMAT_TYPE
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: DELIMITEDTEXT
+          - comma: ','
+          - keyword: FORMAT_OPTIONS
+          - bracketed:
+            - start_bracket: (
+            - external_file_delimited_text_format_options_clause:
+                keyword: FIELD_TERMINATOR
+                comparison_operator:
+                  raw_comparison_operator: '='
+                quoted_literal: "','"
+            - comma: ','
+            - external_file_delimited_text_format_options_clause:
+                keyword: STRING_DELIMITER
+                comparison_operator:
+                  raw_comparison_operator: '='
+                quoted_literal: "'\"'"
+            - comma: ','
+            - external_file_delimited_text_format_options_clause:
+                keyword: FIRST_ROW
+                comparison_operator:
+                  raw_comparison_operator: '='
+                numeric_literal: '2'
+            - comma: ','
+            - external_file_delimited_text_format_options_clause:
+                keyword: USE_TYPE_DEFAULT
+                comparison_operator:
+                  raw_comparison_operator: '='
+                boolean_literal: 'True'
+            - end_bracket: )
+          end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      create_external_file_format:
+      - keyword: CREATE
+      - keyword: EXTERNAL
+      - keyword: FILE
+      - keyword: FORMAT
+      - object_reference:
+          quoted_identifier: '[rcfile1]'
+      - keyword: WITH
+      - bracketed:
+          start_bracket: (
+          external_file_rc_clause:
+          - keyword: FORMAT_TYPE
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: RCFILE
+          - comma: ','
+          - keyword: SERDE_METHOD
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - serde_method: "'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'"
+          - comma: ','
+          - keyword: DATA_COMPRESSION
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - file_compression: "'org.apache.hadoop.io.compress.DefaultCodec'"
+          end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      create_external_file_format:
+      - keyword: CREATE
+      - keyword: EXTERNAL
+      - keyword: FILE
+      - keyword: FORMAT
+      - object_reference:
+          naked_identifier: orcfile1
+      - keyword: WITH
+      - bracketed:
+          start_bracket: (
+          external_file_orc_clause:
+          - keyword: FORMAT_TYPE
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: ORC
+          - comma: ','
+          - keyword: DATA_COMPRESSION
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - file_compression: "'org.apache.hadoop.io.compress.SnappyCodec'"
+          end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      create_external_file_format:
+      - keyword: CREATE
+      - keyword: EXTERNAL
+      - keyword: FILE
+      - keyword: FORMAT
+      - object_reference:
+          naked_identifier: parquetfile1
+      - keyword: WITH
+      - bracketed:
+          start_bracket: (
+          external_file_parquet_clause:
+          - keyword: FORMAT_TYPE
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: PARQUET
+          - comma: ','
+          - keyword: DATA_COMPRESSION
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - file_compression: "'org.apache.hadoop.io.compress.SnappyCodec'"
+          end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      create_external_file_format:
+      - keyword: CREATE
+      - keyword: EXTERNAL
+      - keyword: FILE
+      - keyword: FORMAT
+      - object_reference:
+          naked_identifier: jsonFileFormat
+      - keyword: WITH
+      - bracketed:
+          start_bracket: (
+          external_file_json_clause:
+          - keyword: FORMAT_TYPE
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: JSON
+          - comma: ','
+          - keyword: DATA_COMPRESSION
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - file_compression: "'org.apache.hadoop.io.compress.SnappyCodec'"
+          end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      create_external_file_format:
+      - keyword: CREATE
+      - keyword: EXTERNAL
+      - keyword: FILE
+      - keyword: FORMAT
+      - object_reference:
+          naked_identifier: DeltaFileFormat
+      - keyword: WITH
+      - bracketed:
+          start_bracket: (
+          external_file_delta_clause:
+          - keyword: FORMAT_TYPE
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: DELTA
+          end_bracket: )
+  - statement_terminator: ;
diff --git a/test/fixtures/dialects/tsql/create_external_table.sql b/test/fixtures/dialects/tsql/create_external_table.sql
new file mode 100644
index 0000000..a7616af
--- /dev/null
+++ b/test/fixtures/dialects/tsql/create_external_table.sql
@@ -0,0 +1,46 @@
+CREATE EXTERNAL TABLE schema_name.table_name
+(
+    column_name_1 VARCHAR(50),
+    column_name_2 VARCHAR(50) NULL,
+    column_name_3 VARCHAR(50) NOT NULL
+)
+WITH (
+    LOCATION = N'/path/to/folder/',
+    DATA_SOURCE = external_data_source,
+    FILE_FORMAT = parquetfileformat,
+    REJECT_TYPE = VALUE,
+    REJECT_VALUE = 0,
+    REJECTED_ROW_LOCATION = '/REJECT_Directory'
+)
+
+CREATE EXTERNAL TABLE schema_name.table_name
+(
+    column_name_1 VARCHAR(50),
+    column_name_2 VARCHAR(50) NULL,
+    column_name_3 VARCHAR(50) NOT NULL
+)
+WITH (
+    LOCATION = N'/path/to/folder/',
+    DATA_SOURCE = external_data_source,
+    FILE_FORMAT = parquetfileformat,
+    REJECT_TYPE = PERCENTAGE,
+    REJECT_VALUE = 0,
+    REJECT_SAMPLE_VALUE = 0,
+    REJECTED_ROW_LOCATION = '/REJECT_DIRECTORY'
+)
+
+CREATE EXTERNAL TABLE customers (
+    o_orderkey DECIMAL(38) NOT NULL,
+    o_custkey DECIMAL(38) NOT NULL,
+    o_orderstatus CHAR COLLATE latin1_general_bin NOT NULL,
+    o_totalprice DECIMAL(15, 2) NOT NULL,
+    o_orderdate DATETIME2(0) NOT NULL,
+    o_orderpriority CHAR(15) COLLATE latin1_general_bin NOT NULL,
+    o_clerk CHAR(15) COLLATE latin1_general_bin NOT NULL,
+    o_shippriority DECIMAL(38) NOT NULL,
+    o_comment VARCHAR(79) COLLATE latin1_general_bin NOT NULL
+)
+WITH (
+    LOCATION = 'DB1.mySchema.customer',
+    DATA_SOURCE = external_data_source_name
+);
diff --git a/test/fixtures/dialects/tsql/create_external_table.yml b/test/fixtures/dialects/tsql/create_external_table.yml
new file mode 100644
index 0000000..f24b643
--- /dev/null
+++ b/test/fixtures/dialects/tsql/create_external_table.yml
@@ -0,0 +1,347 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 674327298cc2e1ec4d6325ddb6a906d3af04bf5cc8dfb35b51693ee84ccf9a51
+file:
+  batch:
+  - statement:
+      create_external_table_statement:
+      - keyword: CREATE
+      - keyword: EXTERNAL
+      - keyword: TABLE
+      - object_reference:
+        - naked_identifier: schema_name
+        - dot: .
+        - naked_identifier: table_name
+      - bracketed:
+        - start_bracket: (
+        - column_definition:
+            naked_identifier: column_name_1
+            data_type:
+              data_type_identifier: VARCHAR
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '50'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            naked_identifier: column_name_2
+            data_type:
+              data_type_identifier: VARCHAR
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '50'
+                  end_bracket: )
+            column_constraint_segment:
+              keyword: 'NULL'
+        - comma: ','
+        - column_definition:
+            naked_identifier: column_name_3
+            data_type:
+              data_type_identifier: VARCHAR
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '50'
+                  end_bracket: )
+            column_constraint_segment:
+            - keyword: NOT
+            - keyword: 'NULL'
+        - end_bracket: )
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - table_location_clause:
+            keyword: LOCATION
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "N'/path/to/folder/'"
+        - comma: ','
+        - keyword: DATA_SOURCE
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - object_reference:
+            naked_identifier: external_data_source
+        - comma: ','
+        - keyword: FILE_FORMAT
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - object_reference:
+            naked_identifier: parquetfileformat
+        - comma: ','
+        - keyword: REJECT_TYPE
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: VALUE
+        - comma: ','
+        - keyword: REJECT_VALUE
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - numeric_literal: '0'
+        - comma: ','
+        - keyword: REJECTED_ROW_LOCATION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - quoted_literal: "'/REJECT_Directory'"
+        - end_bracket: )
+  - statement:
+      create_external_table_statement:
+      - keyword: CREATE
+      - keyword: EXTERNAL
+      - keyword: TABLE
+      - object_reference:
+        - naked_identifier: schema_name
+        - dot: .
+        - naked_identifier: table_name
+      - bracketed:
+        - start_bracket: (
+        - column_definition:
+            naked_identifier: column_name_1
+            data_type:
+              data_type_identifier: VARCHAR
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '50'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            naked_identifier: column_name_2
+            data_type:
+              data_type_identifier: VARCHAR
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '50'
+                  end_bracket: )
+            column_constraint_segment:
+              keyword: 'NULL'
+        - comma: ','
+        - column_definition:
+            naked_identifier: column_name_3
+            data_type:
+              data_type_identifier: VARCHAR
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '50'
+                  end_bracket: )
+            column_constraint_segment:
+            - keyword: NOT
+            - keyword: 'NULL'
+        - end_bracket: )
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - table_location_clause:
+            keyword: LOCATION
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "N'/path/to/folder/'"
+        - comma: ','
+        - keyword: DATA_SOURCE
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - object_reference:
+            naked_identifier: external_data_source
+        - comma: ','
+        - keyword: FILE_FORMAT
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - object_reference:
+            naked_identifier: parquetfileformat
+        - comma: ','
+        - keyword: REJECT_TYPE
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - keyword: PERCENTAGE
+        - comma: ','
+        - keyword: REJECT_VALUE
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - numeric_literal: '0'
+        - comma: ','
+        - keyword: REJECT_SAMPLE_VALUE
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - numeric_literal: '0'
+        - comma: ','
+        - keyword: REJECTED_ROW_LOCATION
+        - comparison_operator:
+            raw_comparison_operator: '='
+        - quoted_literal: "'/REJECT_DIRECTORY'"
+        - end_bracket: )
+  - statement:
+      create_external_table_statement:
+      - keyword: CREATE
+      - keyword: EXTERNAL
+      - keyword: TABLE
+      - object_reference:
+          naked_identifier: customers
+      - bracketed:
+        - start_bracket: (
+        - column_definition:
+            naked_identifier: o_orderkey
+            data_type:
+              data_type_identifier: DECIMAL
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '38'
+                  end_bracket: )
+            column_constraint_segment:
+            - keyword: NOT
+            - keyword: 'NULL'
+        - comma: ','
+        - column_definition:
+            naked_identifier: o_custkey
+            data_type:
+              data_type_identifier: DECIMAL
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '38'
+                  end_bracket: )
+            column_constraint_segment:
+            - keyword: NOT
+            - keyword: 'NULL'
+        - comma: ','
+        - column_definition:
+          - naked_identifier: o_orderstatus
+          - data_type:
+              data_type_identifier: CHAR
+          - column_constraint_segment:
+              keyword: COLLATE
+              object_reference:
+                naked_identifier: latin1_general_bin
+          - column_constraint_segment:
+            - keyword: NOT
+            - keyword: 'NULL'
+        - comma: ','
+        - column_definition:
+            naked_identifier: o_totalprice
+            data_type:
+              data_type_identifier: DECIMAL
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '15'
+                - comma: ','
+                - expression:
+                    numeric_literal: '2'
+                - end_bracket: )
+            column_constraint_segment:
+            - keyword: NOT
+            - keyword: 'NULL'
+        - comma: ','
+        - column_definition:
+            naked_identifier: o_orderdate
+            data_type:
+              data_type_identifier: DATETIME2
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '0'
+                  end_bracket: )
+            column_constraint_segment:
+            - keyword: NOT
+            - keyword: 'NULL'
+        - comma: ','
+        - column_definition:
+          - naked_identifier: o_orderpriority
+          - data_type:
+              data_type_identifier: CHAR
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '15'
+                  end_bracket: )
+          - column_constraint_segment:
+              keyword: COLLATE
+              object_reference:
+                naked_identifier: latin1_general_bin
+          - column_constraint_segment:
+            - keyword: NOT
+            - keyword: 'NULL'
+        - comma: ','
+        - column_definition:
+          - naked_identifier: o_clerk
+          - data_type:
+              data_type_identifier: CHAR
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '15'
+                  end_bracket: )
+          - column_constraint_segment:
+              keyword: COLLATE
+              object_reference:
+                naked_identifier: latin1_general_bin
+          - column_constraint_segment:
+            - keyword: NOT
+            - keyword: 'NULL'
+        - comma: ','
+        - column_definition:
+            naked_identifier: o_shippriority
+            data_type:
+              data_type_identifier: DECIMAL
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '38'
+                  end_bracket: )
+            column_constraint_segment:
+            - keyword: NOT
+            - keyword: 'NULL'
+        - comma: ','
+        - column_definition:
+          - naked_identifier: o_comment
+          - data_type:
+              data_type_identifier: VARCHAR
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '79'
+                  end_bracket: )
+          - column_constraint_segment:
+              keyword: COLLATE
+              object_reference:
+                naked_identifier: latin1_general_bin
+          - column_constraint_segment:
+            - keyword: NOT
+            - keyword: 'NULL'
+        - end_bracket: )
+      - keyword: WITH
+      - bracketed:
+          start_bracket: (
+          table_location_clause:
+            keyword: LOCATION
+            comparison_operator:
+              raw_comparison_operator: '='
+            quoted_literal: "'DB1.mySchema.customer'"
+          comma: ','
+          keyword: DATA_SOURCE
+          comparison_operator:
+            raw_comparison_operator: '='
+          object_reference:
+            naked_identifier: external_data_source_name
+          end_bracket: )
+  - statement_terminator: ;
diff --git a/test/fixtures/dialects/tsql/create_function.sql b/test/fixtures/dialects/tsql/create_function.sql
index 93cdbea..8a9c1fa 100644
--- a/test/fixtures/dialects/tsql/create_function.sql
+++ b/test/fixtures/dialects/tsql/create_function.sql
@@ -32,3 +32,22 @@ GO
 
 ALTER FUNCTION F (@DATE as datetime) RETURNS INT AS BEGIN RETURN 0 END;
 GO
+
+CREATE   FUNCTION [UTIL].[getItemList] (
+     @list ItemList READONLY
+)
+RETURNS nvarchar(max)
+AS
+
+BEGIN
+      DECLARE @str nvarchar(max) = ''
+
+      SELECT @str = @str + [item] FROM (
+        SELECT TOP (9999) [item]
+        FROM @list
+        ORDER BY [order]
+      ) i
+
+      RETURN @str
+END;
+GO
diff --git a/test/fixtures/dialects/tsql/create_function.yml b/test/fixtures/dialects/tsql/create_function.yml
index 42c72c0..2712aed 100644
--- a/test/fixtures/dialects/tsql/create_function.yml
+++ b/test/fixtures/dialects/tsql/create_function.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 4c002f668682103d0f48fae1d2ccc011e9b2866999d027077a39e237a07896e3
+_hash: 9ded27ea23a5a527e9a235ea24249e64b61d4711444cf4b648973734d3618169
 file:
 - batch:
     statement:
@@ -89,11 +89,12 @@ file:
                               keyword: as
                               data_type:
                                 data_type_identifier: CHAR
-                                bracketed:
-                                  start_bracket: (
-                                  expression:
-                                    numeric_literal: '4'
-                                  end_bracket: )
+                                bracketed_arguments:
+                                  bracketed:
+                                    start_bracket: (
+                                    expression:
+                                      numeric_literal: '4'
+                                    end_bracket: )
                               end_bracket: )
                           binary_operator: +
                           quoted_literal: "'0104'"
@@ -148,11 +149,12 @@ file:
                                   keyword: AS
                                   data_type:
                                     data_type_identifier: CHAR
-                                    bracketed:
-                                      start_bracket: (
-                                      expression:
-                                        numeric_literal: '4'
-                                      end_bracket: )
+                                    bracketed_arguments:
+                                      bracketed:
+                                        start_bracket: (
+                                        expression:
+                                          numeric_literal: '4'
+                                        end_bracket: )
                                   end_bracket: )
                             - binary_operator: +
                             - quoted_literal: "'12'"
@@ -178,11 +180,12 @@ file:
                                   keyword: AS
                                   data_type:
                                     data_type_identifier: CHAR
-                                    bracketed:
-                                      start_bracket: (
-                                      expression:
-                                        numeric_literal: '2'
-                                      end_bracket: )
+                                    bracketed_arguments:
+                                      bracketed:
+                                        start_bracket: (
+                                        expression:
+                                          numeric_literal: '2'
+                                        end_bracket: )
                                   end_bracket: )
                             end_bracket: )
                         binary_operator: +
@@ -383,3 +386,105 @@ file:
           statement_terminator: ;
 - go_statement:
     keyword: GO
+- batch:
+    statement:
+      create_function_statement:
+      - keyword: CREATE
+      - keyword: FUNCTION
+      - object_reference:
+        - quoted_identifier: '[UTIL]'
+        - dot: .
+        - quoted_identifier: '[getItemList]'
+      - function_parameter_list:
+          bracketed:
+            start_bracket: (
+            parameter: '@list'
+            data_type:
+              data_type_identifier: ItemList
+            keyword: READONLY
+            end_bracket: )
+      - keyword: RETURNS
+      - data_type:
+          data_type_identifier: nvarchar
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              keyword: max
+              end_bracket: )
+      - keyword: AS
+      - procedure_statement:
+          statement:
+            begin_end_block:
+            - keyword: BEGIN
+            - statement:
+                declare_segment:
+                  keyword: DECLARE
+                  parameter: '@str'
+                  data_type:
+                    data_type_identifier: nvarchar
+                    bracketed_arguments:
+                      bracketed:
+                        start_bracket: (
+                        keyword: max
+                        end_bracket: )
+                  comparison_operator:
+                    raw_comparison_operator: '='
+                  expression:
+                    quoted_literal: "''"
+            - statement:
+                select_statement:
+                  select_clause:
+                    keyword: SELECT
+                    select_clause_element:
+                      alias_expression:
+                        parameter: '@str'
+                        raw_comparison_operator: '='
+                      expression:
+                        parameter: '@str'
+                        binary_operator: +
+                        column_reference:
+                          quoted_identifier: '[item]'
+                  from_clause:
+                    keyword: FROM
+                    from_expression:
+                      from_expression_element:
+                        table_expression:
+                          bracketed:
+                            start_bracket: (
+                            select_statement:
+                              select_clause:
+                                keyword: SELECT
+                                select_clause_modifier:
+                                  keyword: TOP
+                                  bracketed:
+                                    start_bracket: (
+                                    expression:
+                                      numeric_literal: '9999'
+                                    end_bracket: )
+                                select_clause_element:
+                                  column_reference:
+                                    quoted_identifier: '[item]'
+                              from_clause:
+                                keyword: FROM
+                                from_expression:
+                                  from_expression_element:
+                                    table_expression:
+                                      table_reference:
+                                        parameter: '@list'
+                              orderby_clause:
+                              - keyword: ORDER
+                              - keyword: BY
+                              - column_reference:
+                                  quoted_identifier: '[order]'
+                            end_bracket: )
+                        alias_expression:
+                          naked_identifier: i
+            - statement:
+                return_segment:
+                  keyword: RETURN
+                  expression:
+                    parameter: '@str'
+            - keyword: END
+          statement_terminator: ;
+- go_statement:
+    keyword: GO
diff --git a/test/fixtures/dialects/tsql/create_schema.yml b/test/fixtures/dialects/tsql/create_schema.yml
index 06725a4..b9cbb6d 100644
--- a/test/fixtures/dialects/tsql/create_schema.yml
+++ b/test/fixtures/dialects/tsql/create_schema.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 64039a9ce14c71cb7752bc27e8af5ad54e226f7f0e7a4c2ce5f9ac81643e82d6
+_hash: c308748e09e74b7c6c4e3a3cdbf45e63a4a756b19637c7fc16cf053dabb11c88
 file:
 - batch:
     statement:
@@ -22,7 +22,8 @@ file:
       - schema_reference:
           quoted_identifier: '[Extracts]'
       - keyword: AUTHORIZATION
-      - quoted_identifier: '[dbo]'
+      - role_reference:
+          quoted_identifier: '[dbo]'
       - statement_terminator: ;
 - go_statement:
     keyword: GO
diff --git a/test/fixtures/dialects/tsql/create_table.sql b/test/fixtures/dialects/tsql/create_table.sql
index 17abace..198849a 100644
--- a/test/fixtures/dialects/tsql/create_table.sql
+++ b/test/fixtures/dialects/tsql/create_table.sql
@@ -3,3 +3,13 @@ CREATE TABLE [dbo].[EC DC] (
     [ColumnC] varchar(100),
     [ColumnDecimal] decimal(10,3)
 )
+
+-- Test various forms of quoted data types
+CREATE TABLE foo (
+    pk int PRIMARY KEY,
+    quoted_name [custom udt],
+    qualified_name sch.qualified,
+    quoted_qualified "my schema".qualified,
+    more_quoted "my schema"."custom udt",
+    quoted_udt sch.[custom udt]
+);
diff --git a/test/fixtures/dialects/tsql/create_table.yml b/test/fixtures/dialects/tsql/create_table.yml
index 4a3c9ba..703e280 100644
--- a/test/fixtures/dialects/tsql/create_table.yml
+++ b/test/fixtures/dialects/tsql/create_table.yml
@@ -3,10 +3,10 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 9c4df4bc451697763a63f72353e16d68127913db625f1b309fd65bccac00232f
+_hash: cfcae343a1e7dffe16887fb99acfec87e64823d1dbbe32c94bd4a971757073cb
 file:
   batch:
-    statement:
+  - statement:
       create_table_statement:
       - keyword: CREATE
       - keyword: TABLE
@@ -20,32 +20,85 @@ file:
             quoted_identifier: '[Column B]'
             data_type:
               data_type_identifier: '[varchar]'
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnC]'
             data_type:
               data_type_identifier: varchar
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnDecimal]'
             data_type:
               data_type_identifier: decimal
-              bracketed:
-              - start_bracket: (
-              - expression:
-                  numeric_literal: '10'
-              - comma: ','
-              - expression:
-                  numeric_literal: '3'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
         - end_bracket: )
+  - statement:
+      create_table_statement:
+      - keyword: CREATE
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: foo
+      - bracketed:
+        - start_bracket: (
+        - column_definition:
+            naked_identifier: pk
+            data_type:
+              data_type_identifier: int
+            column_constraint_segment:
+            - keyword: PRIMARY
+            - keyword: KEY
+        - comma: ','
+        - column_definition:
+            naked_identifier: quoted_name
+            data_type:
+              quoted_identifier: '[custom udt]'
+        - comma: ','
+        - column_definition:
+            naked_identifier: qualified_name
+            data_type:
+              naked_identifier: sch
+              dot: .
+              data_type_identifier: qualified
+        - comma: ','
+        - column_definition:
+            naked_identifier: quoted_qualified
+            data_type:
+              quoted_identifier: '"my schema"'
+              dot: .
+              data_type_identifier: qualified
+        - comma: ','
+        - column_definition:
+            naked_identifier: more_quoted
+            data_type:
+            - quoted_identifier: '"my schema"'
+            - dot: .
+            - quoted_identifier: '"custom udt"'
+        - comma: ','
+        - column_definition:
+            naked_identifier: quoted_udt
+            data_type:
+              naked_identifier: sch
+              dot: .
+              quoted_identifier: '[custom udt]'
+        - end_bracket: )
+      - statement_terminator: ;
diff --git a/test/fixtures/dialects/tsql/create_table_constraints.yml b/test/fixtures/dialects/tsql/create_table_constraints.yml
index 3703c01..78bf35a 100644
--- a/test/fixtures/dialects/tsql/create_table_constraints.yml
+++ b/test/fixtures/dialects/tsql/create_table_constraints.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: ae78f80d2edb04232dca076a9b908ecec860d7000e48422858ae0811b3ace7c1
+_hash: c209d12094922d427621030eedd44fc62ddf5d14b674f093b04220a29b5d32e6
 file:
 - batch:
     statement:
@@ -45,11 +45,12 @@ file:
             quoted_identifier: '[ColumnC]'
             data_type:
               data_type_identifier: varchar
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
             column_constraint_segment:
               keyword: DEFAULT
               quoted_literal: "'mydefault'"
@@ -208,11 +209,12 @@ file:
           - quoted_identifier: '[ColumnB]'
           - data_type:
               data_type_identifier: '[varchar]'
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
           - column_constraint_segment:
               keyword: FILESTREAM
           - column_constraint_segment:
@@ -230,11 +232,12 @@ file:
           - quoted_identifier: '[ColumnC]'
           - data_type:
               data_type_identifier: varchar
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
           - column_constraint_segment:
               keyword: 'NULL'
           - column_constraint_segment:
@@ -246,14 +249,15 @@ file:
             quoted_identifier: '[ColumnDecimal]'
             data_type:
               data_type_identifier: decimal
-              bracketed:
-              - start_bracket: (
-              - expression:
-                  numeric_literal: '10'
-              - comma: ','
-              - expression:
-                  numeric_literal: '3'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
             column_constraint_segment:
             - keyword: GENERATED
             - keyword: ALWAYS
@@ -266,11 +270,12 @@ file:
             quoted_identifier: '[columnE]'
             data_type:
               data_type_identifier: varchar
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
             column_constraint_segment:
               encrypted_with_grammar:
               - keyword: ENCRYPTED
@@ -297,11 +302,12 @@ file:
             quoted_identifier: '[column1]'
             data_type:
               data_type_identifier: varchar
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
             column_constraint_segment:
               keyword: collate
               object_reference:
diff --git a/test/fixtures/dialects/tsql/create_table_on_filegroup.yml b/test/fixtures/dialects/tsql/create_table_on_filegroup.yml
index 5e19c38..67593a8 100644
--- a/test/fixtures/dialects/tsql/create_table_on_filegroup.yml
+++ b/test/fixtures/dialects/tsql/create_table_on_filegroup.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 85548d5c81e3b0d0f3b7b710c41ed57f079648a0884c028004ff292adf184f5d
+_hash: e98064d324c4b081392469410dbfab5dddbc12b258a107db6cd00d212abb8f2e
 file:
   batch:
     statement:
@@ -20,34 +20,37 @@ file:
             quoted_identifier: '[Column B]'
             data_type:
               data_type_identifier: '[varchar]'
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnC]'
             data_type:
               data_type_identifier: varchar
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnDecimal]'
             data_type:
               data_type_identifier: decimal
-              bracketed:
-              - start_bracket: (
-              - expression:
-                  numeric_literal: '10'
-              - comma: ','
-              - expression:
-                  numeric_literal: '3'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
         - end_bracket: )
       - on_partition_or_filegroup_statement:
           filegroup_clause:
diff --git a/test/fixtures/dialects/tsql/create_table_with_distribution.yml b/test/fixtures/dialects/tsql/create_table_with_distribution.yml
index 39f4f93..43e8e84 100644
--- a/test/fixtures/dialects/tsql/create_table_with_distribution.yml
+++ b/test/fixtures/dialects/tsql/create_table_with_distribution.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 949a316997d06d8bd81c0b45452d66cb06690d73bbbad7dfdfcc4c30a70fcd3c
+_hash: aeb7acea73d669a0a20a1a1a8280c6f23521a663b9ab9fd3c40d4c110312b983
 file:
 - batch:
     statement:
@@ -20,34 +20,37 @@ file:
             quoted_identifier: '[Column B]'
             data_type:
               data_type_identifier: '[varchar]'
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnC]'
             data_type:
               data_type_identifier: varchar
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnDecimal]'
             data_type:
               data_type_identifier: decimal
-              bracketed:
-              - start_bracket: (
-              - expression:
-                  numeric_literal: '10'
-              - comma: ','
-              - expression:
-                  numeric_literal: '3'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
         - end_bracket: )
       - table_distribution_index_clause:
           keyword: WITH
@@ -93,34 +96,37 @@ file:
             quoted_identifier: '[Column B]'
             data_type:
               data_type_identifier: '[varchar]'
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnC]'
             data_type:
               data_type_identifier: varchar
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnDecimal]'
             data_type:
               data_type_identifier: decimal
-              bracketed:
-              - start_bracket: (
-              - expression:
-                  numeric_literal: '10'
-              - comma: ','
-              - expression:
-                  numeric_literal: '3'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
         - end_bracket: )
       - table_distribution_index_clause:
           keyword: WITH
@@ -164,34 +170,37 @@ file:
             quoted_identifier: '[Column B]'
             data_type:
               data_type_identifier: '[varchar]'
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnC]'
             data_type:
               data_type_identifier: varchar
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnDecimal]'
             data_type:
               data_type_identifier: decimal
-              bracketed:
-              - start_bracket: (
-              - expression:
-                  numeric_literal: '10'
-              - comma: ','
-              - expression:
-                  numeric_literal: '3'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
         - end_bracket: )
       - table_distribution_index_clause:
           keyword: WITH
@@ -243,34 +252,37 @@ file:
             quoted_identifier: '[Column B]'
             data_type:
               data_type_identifier: '[varchar]'
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnC]'
             data_type:
               data_type_identifier: varchar
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnDecimal]'
             data_type:
               data_type_identifier: decimal
-              bracketed:
-              - start_bracket: (
-              - expression:
-                  numeric_literal: '10'
-              - comma: ','
-              - expression:
-                  numeric_literal: '3'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
         - end_bracket: )
       - table_distribution_index_clause:
           keyword: WITH
@@ -327,34 +339,37 @@ file:
             quoted_identifier: '[Column B]'
             data_type:
               data_type_identifier: '[varchar]'
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnC]'
             data_type:
               data_type_identifier: varchar
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnDecimal]'
             data_type:
               data_type_identifier: decimal
-              bracketed:
-              - start_bracket: (
-              - expression:
-                  numeric_literal: '10'
-              - comma: ','
-              - expression:
-                  numeric_literal: '3'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
         - end_bracket: )
       - table_distribution_index_clause:
           keyword: WITH
@@ -410,34 +425,37 @@ file:
             quoted_identifier: '[Column B]'
             data_type:
               data_type_identifier: '[varchar]'
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnC]'
             data_type:
               data_type_identifier: varchar
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[ColumnDecimal]'
             data_type:
               data_type_identifier: decimal
-              bracketed:
-              - start_bracket: (
-              - expression:
-                  numeric_literal: '10'
-              - comma: ','
-              - expression:
-                  numeric_literal: '3'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
         - end_bracket: )
       - table_distribution_index_clause:
           keyword: WITH
diff --git a/test/fixtures/dialects/tsql/create_table_with_sequence_bracketed.yml b/test/fixtures/dialects/tsql/create_table_with_sequence_bracketed.yml
index 6cbcf7b..eebe3d9 100644
--- a/test/fixtures/dialects/tsql/create_table_with_sequence_bracketed.yml
+++ b/test/fixtures/dialects/tsql/create_table_with_sequence_bracketed.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 1bd12039e6f42fc17a4d325703a2c1ec44307e77fc9a4ea4f90e0bce8f7aae2e
+_hash: bd8e5b608fd5cd901c0f50ac677f1ae452f8d065fe2abcb5f38423944df59dae
 file:
 - batch:
     statement:
@@ -114,24 +114,26 @@ file:
             naked_identifier: GMCODE
             data_type:
               data_type_identifier: VARCHAR
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             naked_identifier: AVERAGE_RNA_FLOW_PER_100000
             data_type:
               data_type_identifier: DECIMAL
-              bracketed:
-              - start_bracket: (
-              - expression:
-                  numeric_literal: '16'
-              - comma: ','
-              - expression:
-                  numeric_literal: '2'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '16'
+                - comma: ','
+                - expression:
+                    numeric_literal: '2'
+                - end_bracket: )
             column_constraint_segment:
               keyword: 'NULL'
         - comma: ','
@@ -274,11 +276,12 @@ file:
             naked_identifier: GEMEENTE_CODE
             data_type:
               data_type_identifier: VARCHAR
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
             column_constraint_segment:
               keyword: 'NULL'
         - comma: ','
@@ -286,11 +289,12 @@ file:
             naked_identifier: GEMEENTE
             data_type:
               data_type_identifier: VARCHAR
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
             column_constraint_segment:
               keyword: 'NULL'
         - comma: ','
@@ -298,11 +302,12 @@ file:
             naked_identifier: LEEFTIJD
             data_type:
               data_type_identifier: VARCHAR
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
             column_constraint_segment:
               keyword: 'NULL'
         - comma: ','
@@ -310,11 +315,12 @@ file:
             naked_identifier: GESLACHT
             data_type:
               data_type_identifier: VARCHAR
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
             column_constraint_segment:
               keyword: 'NULL'
         - comma: ','
@@ -322,11 +328,12 @@ file:
             naked_identifier: DATUM_PEILING
             data_type:
               data_type_identifier: VARCHAR
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
             column_constraint_segment:
               keyword: 'NULL'
         - comma: ','
@@ -334,11 +341,12 @@ file:
             naked_identifier: POPULATIE
             data_type:
               data_type_identifier: VARCHAR
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
             column_constraint_segment:
               keyword: 'NULL'
         - comma: ','
@@ -346,11 +354,12 @@ file:
             naked_identifier: VEILIGHEIDSREGIO_CODE
             data_type:
               data_type_identifier: VARCHAR
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
             column_constraint_segment:
               keyword: 'NULL'
         - comma: ','
@@ -358,11 +367,12 @@ file:
             naked_identifier: VEILIGHEIDSREGIO_NAAM
             data_type:
               data_type_identifier: VARCHAR
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
             column_constraint_segment:
               keyword: 'NULL'
         - comma: ','
@@ -370,11 +380,12 @@ file:
             naked_identifier: PROVINCIE_CODE
             data_type:
               data_type_identifier: VARCHAR
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
             column_constraint_segment:
               keyword: 'NULL'
         - comma: ','
@@ -382,11 +393,12 @@ file:
             naked_identifier: PROVINCIE_NAAM
             data_type:
               data_type_identifier: VARCHAR
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
             column_constraint_segment:
               keyword: 'NULL'
         - comma: ','
@@ -394,11 +406,12 @@ file:
             naked_identifier: GGD_CODE
             data_type:
               data_type_identifier: VARCHAR
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
             column_constraint_segment:
               keyword: 'NULL'
         - comma: ','
@@ -406,11 +419,12 @@ file:
             naked_identifier: GGD_NAAM
             data_type:
               data_type_identifier: VARCHAR
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
             column_constraint_segment:
               keyword: 'NULL'
         - comma: ','
@@ -559,14 +573,15 @@ file:
             naked_identifier: INFECTED_DAILY_INCREASE
             data_type:
               data_type_identifier: DECIMAL
-              bracketed:
-              - start_bracket: (
-              - expression:
-                  numeric_literal: '16'
-              - comma: ','
-              - expression:
-                  numeric_literal: '1'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '16'
+                - comma: ','
+                - expression:
+                    numeric_literal: '1'
+                - end_bracket: )
             column_constraint_segment:
               keyword: 'NULL'
         - comma: ','
@@ -609,39 +624,42 @@ file:
             quoted_identifier: '[7D_AVERAGE_INFECTED_DAILY_INCREASE_TOTAL]'
             data_type:
               data_type_identifier: decimal
-              bracketed:
-              - start_bracket: (
-              - expression:
-                  numeric_literal: '16'
-              - comma: ','
-              - expression:
-                  numeric_literal: '2'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '16'
+                - comma: ','
+                - expression:
+                    numeric_literal: '2'
+                - end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[7D_AVERAGE_INFECTED_DAILY_INCREASE_LAG]'
             data_type:
               data_type_identifier: decimal
-              bracketed:
-              - start_bracket: (
-              - expression:
-                  numeric_literal: '16'
-              - comma: ','
-              - expression:
-                  numeric_literal: '2'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '16'
+                - comma: ','
+                - expression:
+                    numeric_literal: '2'
+                - end_bracket: )
         - comma: ','
         - column_definition:
             quoted_identifier: '[7D_AVERAGE_INFECTED_DAILY_INCREASE_ABSOLUTE]'
             data_type:
               data_type_identifier: decimal
-              bracketed:
-              - start_bracket: (
-              - expression:
-                  numeric_literal: '16'
-              - comma: ','
-              - expression:
-                  numeric_literal: '2'
-              - end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '16'
+                - comma: ','
+                - expression:
+                    numeric_literal: '2'
+                - end_bracket: )
         - end_bracket: )
       - statement_terminator: ;
diff --git a/test/fixtures/dialects/tsql/create_table_with_trailing_comma.yml b/test/fixtures/dialects/tsql/create_table_with_trailing_comma.yml
index f76c8b2..79f0f8d 100644
--- a/test/fixtures/dialects/tsql/create_table_with_trailing_comma.yml
+++ b/test/fixtures/dialects/tsql/create_table_with_trailing_comma.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 2b88578420020536b87708edc25b6e03e3d68f285ad98811fb1feeeb6855847c
+_hash: 1741f41c3ad5d4387b08448fc575cf531f48835b024826f368d22069ecf693d9
 file:
   batch:
     statement:
@@ -20,10 +20,11 @@ file:
             quoted_identifier: '[Column B]'
             data_type:
               data_type_identifier: '[varchar]'
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
           comma: ','
           end_bracket: )
diff --git a/test/fixtures/dialects/tsql/create_type.yml b/test/fixtures/dialects/tsql/create_type.yml
index b141fb3..8fd384a 100644
--- a/test/fixtures/dialects/tsql/create_type.yml
+++ b/test/fixtures/dialects/tsql/create_type.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: a2c1c31fefcb68ff28a78b97c46dd954b01b2f37fd35e738d24f403801789768
+_hash: 1873d9ddf12a690dee729b24d8f6678ce9686f646e8321db57a10d32d349aadb
 file:
   batch:
   - statement:
@@ -20,11 +20,12 @@ file:
             naked_identifier: name
             data_type:
               data_type_identifier: nvarchar
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '10'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '10'
+                  end_bracket: )
         - comma: ','
         - column_definition:
             naked_identifier: height
diff --git a/test/fixtures/dialects/tsql/declare_with_following_statements.sql b/test/fixtures/dialects/tsql/declare_with_following_statements.sql
index 653e0e6..4905fcd 100644
--- a/test/fixtures/dialects/tsql/declare_with_following_statements.sql
+++ b/test/fixtures/dialects/tsql/declare_with_following_statements.sql
@@ -10,6 +10,10 @@ BEGIN
 
 	DECLARE @EOMONTH DATE = ('1900-01-01')
 
+	DECLARE @USER DATE = SYSTEM_USER;
+
+	DECLARE @CURRENTTIME DATE = CURRENT_TIMESTAMP;
+
 	SET @EOMONTH = ('2000-01-01')
 
 	SET @EOMONTH = ('2001-01-01');
diff --git a/test/fixtures/dialects/tsql/declare_with_following_statements.yml b/test/fixtures/dialects/tsql/declare_with_following_statements.yml
index 99ba300..3cbf0b1 100644
--- a/test/fixtures/dialects/tsql/declare_with_following_statements.yml
+++ b/test/fixtures/dialects/tsql/declare_with_following_statements.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: efd2ca6bc014ba59a3b45dcdfcd08c6fff7bfbcbf298e92432055703cf140734
+_hash: 965bf0479aba3f070f232c24f5cd5972ded3a634a9f5752eead4702eb1548291
 file:
   batch:
     create_procedure_statement:
@@ -84,6 +84,28 @@ file:
                     expression:
                       quoted_literal: "'1900-01-01'"
                     end_bracket: )
+          - statement:
+              declare_segment:
+                keyword: DECLARE
+                parameter: '@USER'
+                data_type:
+                  data_type_identifier: DATE
+                comparison_operator:
+                  raw_comparison_operator: '='
+                expression:
+                  bare_function: SYSTEM_USER
+                statement_terminator: ;
+          - statement:
+              declare_segment:
+                keyword: DECLARE
+                parameter: '@CURRENTTIME'
+                data_type:
+                  data_type_identifier: DATE
+                comparison_operator:
+                  raw_comparison_operator: '='
+                expression:
+                  bare_function: CURRENT_TIMESTAMP
+                statement_terminator: ;
           - statement:
               set_segment:
                 keyword: SET
diff --git a/test/fixtures/dialects/tsql/delete.yml b/test/fixtures/dialects/tsql/delete.yml
index d4e5a18..c9f3839 100644
--- a/test/fixtures/dialects/tsql/delete.yml
+++ b/test/fixtures/dialects/tsql/delete.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: f61ec5dd4a6ba9978e233cba06f0b7d6a9d3483827a18fe8f7b594c545c6224d
+_hash: 499e9012f5cb95c4d85b318e244cf97db07a0782422e2f77aa327eb635e1e8a9
 file:
 - batch:
     statement:
@@ -76,11 +76,12 @@ file:
               keyword: as
               data_type:
                 data_type_identifier: char
-                bracketed:
-                  start_bracket: (
-                  expression:
-                    numeric_literal: '3'
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    expression:
+                      numeric_literal: '3'
+                    end_bracket: )
               end_bracket: )
         statement_terminator: ;
 - go_statement:
@@ -552,11 +553,12 @@ file:
             naked_identifier: ProductName
             data_type:
               data_type_identifier: nvarchar
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '50'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '50'
+                  end_bracket: )
             column_constraint_segment:
             - keyword: NOT
             - keyword: 'NULL'
diff --git a/test/fixtures/dialects/tsql/execute.sql b/test/fixtures/dialects/tsql/execute.sql
index a449adc..4e8fc84 100644
--- a/test/fixtures/dialects/tsql/execute.sql
+++ b/test/fixtures/dialects/tsql/execute.sql
@@ -32,4 +32,6 @@ EXECUTE @pRes = dbo.ProcTestDefaults @p1 = DEFAULT;
 
 -- Executing statement from a variable
 DECLARE @statement nvarchar(max) = 'SELECT 1'
-EXEC (@statement)
+EXEC (@statement);
+
+EXEC ('DROP TABLE BoardInventory.BoardInventoryFact_Stage;');
diff --git a/test/fixtures/dialects/tsql/execute.yml b/test/fixtures/dialects/tsql/execute.yml
index 99349d4..3775086 100644
--- a/test/fixtures/dialects/tsql/execute.yml
+++ b/test/fixtures/dialects/tsql/execute.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 553a622220bf6d28a299ea98be359e711753afc5a911a494ec15d8b49db728d4
+_hash: 84fadab934cf174f454d0fa9b518ddb7164fa80fe55111cd14f33d8c97b93fb7
 file:
   batch:
   - statement:
@@ -206,10 +206,11 @@ file:
         parameter: '@statement'
         data_type:
           data_type_identifier: nvarchar
-          bracketed:
-            start_bracket: (
-            keyword: max
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              keyword: max
+              end_bracket: )
         comparison_operator:
           raw_comparison_operator: '='
         expression:
@@ -222,3 +223,12 @@ file:
           object_reference:
             parameter: '@statement'
           end_bracket: )
+        statement_terminator: ;
+  - statement:
+      execute_script_statement:
+        keyword: EXEC
+        bracketed:
+          start_bracket: (
+          quoted_literal: "'DROP TABLE BoardInventory.BoardInventoryFact_Stage;'"
+          end_bracket: )
+        statement_terminator: ;
diff --git a/test/fixtures/dialects/tsql/function_default_params.yml b/test/fixtures/dialects/tsql/function_default_params.yml
index fa5e387..b79aaac 100644
--- a/test/fixtures/dialects/tsql/function_default_params.yml
+++ b/test/fixtures/dialects/tsql/function_default_params.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 30609c9432def3d60a46ab44bffa6749659192dbab365b5180a19826c8fbf5e5
+_hash: d7b3e7a4024bf5794510cdb9fe85e46231c6e7f4a658d817d6396cd865f1b5b3
 file:
   batch:
     create_procedure_statement:
@@ -17,11 +17,12 @@ file:
       - parameter: '@param1'
       - data_type:
           data_type_identifier: nvarchar
-          bracketed:
-            start_bracket: (
-            expression:
-              numeric_literal: '10'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              expression:
+                numeric_literal: '10'
+              end_bracket: )
       - comparison_operator:
           raw_comparison_operator: '='
       - expression:
diff --git a/test/fixtures/dialects/tsql/hints.yml b/test/fixtures/dialects/tsql/hints.yml
index c00b9a1..c5daade 100644
--- a/test/fixtures/dialects/tsql/hints.yml
+++ b/test/fixtures/dialects/tsql/hints.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: e43dd4231a12a1515781ca109632f9fdbb660030b474b3b475f0f99735425dd0
+_hash: 6fe6077b68d1043a06174c757f1566f29133888aab96b254faa753d3b667b133
 file:
 - batch:
     statement:
@@ -82,20 +82,22 @@ file:
       - parameter: '@city_name'
       - data_type:
           data_type_identifier: NVARCHAR
-          bracketed:
-            start_bracket: (
-            expression:
-              numeric_literal: '30'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              expression:
+                numeric_literal: '30'
+              end_bracket: )
       - comma: ','
       - parameter: '@postal_code'
       - data_type:
           data_type_identifier: NVARCHAR
-          bracketed:
-            start_bracket: (
-            expression:
-              numeric_literal: '15'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              expression:
+                numeric_literal: '15'
+              end_bracket: )
     - keyword: AS
     - procedure_statement:
         statement:
diff --git a/test/fixtures/dialects/tsql/merge.sql b/test/fixtures/dialects/tsql/merge.sql
index 4c61cbf..2c4f9fb 100644
--- a/test/fixtures/dialects/tsql/merge.sql
+++ b/test/fixtures/dialects/tsql/merge.sql
@@ -167,3 +167,30 @@ from
 ) as upd
 ;
 GO
+
+MERGE Production.UnitMeasure WITH (PAGLOCK) AS tgt
+    USING (SELECT @UnitMeasureCode, @Name) as src (UnitMeasureCode, Name)
+    ON (tgt.UnitMeasureCode = src.UnitMeasureCode)
+    WHEN MATCHED THEN
+        UPDATE SET Name = src.Name
+    WHEN NOT MATCHED THEN
+        INSERT (UnitMeasureCode, Name)
+        VALUES (src.UnitMeasureCode, src.Name)
+    OUTPUT deleted.*, $action, inserted.* INTO #MyTempTable;
+GO
+
+MERGE INTO Production.ProductInventory WITH (ROWLOCK, INDEX(myindex, myindex2)) AS pi
+     USING (SELECT ProductID, SUM(OrderQty)
+            FROM Sales.SalesOrderDetail AS sod
+            JOIN Sales.SalesOrderHeader AS soh
+            ON sod.SalesOrderID = soh.SalesOrderID
+            AND soh.OrderDate BETWEEN '20030701' AND '20030731'
+            GROUP BY ProductID) AS src (ProductID, OrderQty)
+     ON pi.ProductID = src.ProductID
+    WHEN MATCHED AND pi.Quantity - src.OrderQty >= 0
+        THEN UPDATE SET pi.Quantity = pi.Quantity - src.OrderQty
+    WHEN MATCHED AND pi.Quantity - src.OrderQty <= 0
+        THEN DELETE
+    OUTPUT $action, Inserted.ProductID, Inserted.LocationID,
+        Inserted.Quantity AS NewQty, Deleted.Quantity AS PreviousQty;
+GO
diff --git a/test/fixtures/dialects/tsql/merge.yml b/test/fixtures/dialects/tsql/merge.yml
index 81a8aa3..c437c67 100644
--- a/test/fixtures/dialects/tsql/merge.yml
+++ b/test/fixtures/dialects/tsql/merge.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: d45a12ff4b2f569952339683d90192ef155a18140378f82e54bad347f3a3e74e
+_hash: eeed743b0f3ee6e127b892a23031b17bc824eeb3d889397a2755b167a44cea34
 file:
 - batch:
     statement:
@@ -1098,3 +1098,347 @@ file:
             statement_terminator: ;
 - go_statement:
     keyword: GO
+- batch:
+    statement:
+      merge_statement:
+      - keyword: MERGE
+      - table_reference:
+        - naked_identifier: Production
+        - dot: .
+        - naked_identifier: UnitMeasure
+      - keyword: WITH
+      - bracketed:
+          start_bracket: (
+          query_hint_segment:
+            keyword: PAGLOCK
+          end_bracket: )
+      - alias_expression:
+          keyword: AS
+          naked_identifier: tgt
+      - keyword: USING
+      - bracketed:
+          start_bracket: (
+          select_statement:
+            select_clause:
+            - keyword: SELECT
+            - select_clause_element:
+                parameter: '@UnitMeasureCode'
+            - comma: ','
+            - select_clause_element:
+                parameter: '@Name'
+          end_bracket: )
+      - alias_expression:
+          keyword: as
+          naked_identifier: src
+          bracketed:
+            start_bracket: (
+            identifier_list:
+            - naked_identifier: UnitMeasureCode
+            - comma: ','
+            - naked_identifier: Name
+            end_bracket: )
+      - join_on_condition:
+          keyword: 'ON'
+          bracketed:
+            start_bracket: (
+            expression:
+            - column_reference:
+              - naked_identifier: tgt
+              - dot: .
+              - naked_identifier: UnitMeasureCode
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - column_reference:
+              - naked_identifier: src
+              - dot: .
+              - naked_identifier: UnitMeasureCode
+            end_bracket: )
+      - merge_match:
+          merge_when_matched_clause:
+          - keyword: WHEN
+          - keyword: MATCHED
+          - keyword: THEN
+          - merge_update_clause:
+              keyword: UPDATE
+              set_clause_list:
+                keyword: SET
+                set_clause:
+                  column_reference:
+                    naked_identifier: Name
+                  assignment_operator:
+                    raw_comparison_operator: '='
+                  expression:
+                    column_reference:
+                    - naked_identifier: src
+                    - dot: .
+                    - naked_identifier: Name
+          merge_when_not_matched_clause:
+          - keyword: WHEN
+          - keyword: NOT
+          - keyword: MATCHED
+          - keyword: THEN
+          - merge_insert_clause:
+            - keyword: INSERT
+            - bracketed:
+              - start_bracket: (
+              - column_reference:
+                  naked_identifier: UnitMeasureCode
+              - comma: ','
+              - column_reference:
+                  naked_identifier: Name
+              - end_bracket: )
+            - keyword: VALUES
+            - bracketed:
+              - start_bracket: (
+              - expression:
+                  column_reference:
+                  - naked_identifier: src
+                  - dot: .
+                  - naked_identifier: UnitMeasureCode
+              - comma: ','
+              - expression:
+                  column_reference:
+                  - naked_identifier: src
+                  - dot: .
+                  - naked_identifier: Name
+              - end_bracket: )
+          output_clause:
+          - keyword: OUTPUT
+          - wildcard_expression:
+              wildcard_identifier:
+                naked_identifier: deleted
+                dot: .
+                star: '*'
+          - comma: ','
+          - column_reference:
+              variable_identifier: $action
+          - comma: ','
+          - wildcard_expression:
+              wildcard_identifier:
+                naked_identifier: inserted
+                dot: .
+                star: '*'
+          - keyword: INTO
+          - table_reference:
+              hash_identifier: '#MyTempTable'
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      merge_statement:
+      - keyword: MERGE
+      - keyword: INTO
+      - table_reference:
+        - naked_identifier: Production
+        - dot: .
+        - naked_identifier: ProductInventory
+      - keyword: WITH
+      - bracketed:
+        - start_bracket: (
+        - query_hint_segment:
+            keyword: ROWLOCK
+        - comma: ','
+        - query_hint_segment:
+            keyword: INDEX
+            bracketed:
+            - start_bracket: (
+            - index_reference:
+                naked_identifier: myindex
+            - comma: ','
+            - index_reference:
+                naked_identifier: myindex2
+            - end_bracket: )
+        - end_bracket: )
+      - alias_expression:
+          keyword: AS
+          naked_identifier: pi
+      - keyword: USING
+      - bracketed:
+          start_bracket: (
+          select_statement:
+            select_clause:
+            - keyword: SELECT
+            - select_clause_element:
+                column_reference:
+                  naked_identifier: ProductID
+            - comma: ','
+            - select_clause_element:
+                function:
+                  function_name:
+                    function_name_identifier: SUM
+                  bracketed:
+                    start_bracket: (
+                    expression:
+                      column_reference:
+                        naked_identifier: OrderQty
+                    end_bracket: )
+            from_clause:
+              keyword: FROM
+              from_expression:
+                from_expression_element:
+                  table_expression:
+                    table_reference:
+                    - naked_identifier: Sales
+                    - dot: .
+                    - naked_identifier: SalesOrderDetail
+                  alias_expression:
+                    keyword: AS
+                    naked_identifier: sod
+                join_clause:
+                  keyword: JOIN
+                  from_expression_element:
+                    table_expression:
+                      table_reference:
+                      - naked_identifier: Sales
+                      - dot: .
+                      - naked_identifier: SalesOrderHeader
+                    alias_expression:
+                      keyword: AS
+                      naked_identifier: soh
+                  join_on_condition:
+                    keyword: 'ON'
+                    expression:
+                    - column_reference:
+                      - naked_identifier: sod
+                      - dot: .
+                      - naked_identifier: SalesOrderID
+                    - comparison_operator:
+                        raw_comparison_operator: '='
+                    - column_reference:
+                      - naked_identifier: soh
+                      - dot: .
+                      - naked_identifier: SalesOrderID
+                    - binary_operator: AND
+                    - column_reference:
+                      - naked_identifier: soh
+                      - dot: .
+                      - naked_identifier: OrderDate
+                    - keyword: BETWEEN
+                    - quoted_literal: "'20030701'"
+                    - keyword: AND
+                    - quoted_literal: "'20030731'"
+            groupby_clause:
+            - keyword: GROUP
+            - keyword: BY
+            - column_reference:
+                naked_identifier: ProductID
+          end_bracket: )
+      - alias_expression:
+          keyword: AS
+          naked_identifier: src
+          bracketed:
+            start_bracket: (
+            identifier_list:
+            - naked_identifier: ProductID
+            - comma: ','
+            - naked_identifier: OrderQty
+            end_bracket: )
+      - join_on_condition:
+          keyword: 'ON'
+          expression:
+          - column_reference:
+            - naked_identifier: pi
+            - dot: .
+            - naked_identifier: ProductID
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - column_reference:
+            - naked_identifier: src
+            - dot: .
+            - naked_identifier: ProductID
+      - merge_match:
+        - merge_when_matched_clause:
+          - keyword: WHEN
+          - keyword: MATCHED
+          - keyword: AND
+          - expression:
+            - column_reference:
+              - naked_identifier: pi
+              - dot: .
+              - naked_identifier: Quantity
+            - binary_operator: '-'
+            - column_reference:
+              - naked_identifier: src
+              - dot: .
+              - naked_identifier: OrderQty
+            - comparison_operator:
+              - raw_comparison_operator: '>'
+              - raw_comparison_operator: '='
+            - numeric_literal: '0'
+          - keyword: THEN
+          - merge_update_clause:
+              keyword: UPDATE
+              set_clause_list:
+                keyword: SET
+                set_clause:
+                  column_reference:
+                  - naked_identifier: pi
+                  - dot: .
+                  - naked_identifier: Quantity
+                  assignment_operator:
+                    raw_comparison_operator: '='
+                  expression:
+                  - column_reference:
+                    - naked_identifier: pi
+                    - dot: .
+                    - naked_identifier: Quantity
+                  - binary_operator: '-'
+                  - column_reference:
+                    - naked_identifier: src
+                    - dot: .
+                    - naked_identifier: OrderQty
+        - merge_when_matched_clause:
+          - keyword: WHEN
+          - keyword: MATCHED
+          - keyword: AND
+          - expression:
+            - column_reference:
+              - naked_identifier: pi
+              - dot: .
+              - naked_identifier: Quantity
+            - binary_operator: '-'
+            - column_reference:
+              - naked_identifier: src
+              - dot: .
+              - naked_identifier: OrderQty
+            - comparison_operator:
+              - raw_comparison_operator: <
+              - raw_comparison_operator: '='
+            - numeric_literal: '0'
+          - keyword: THEN
+          - merge_delete_clause:
+              keyword: DELETE
+        - output_clause:
+          - keyword: OUTPUT
+          - column_reference:
+              variable_identifier: $action
+          - comma: ','
+          - column_reference:
+            - naked_identifier: Inserted
+            - dot: .
+            - naked_identifier: ProductID
+          - comma: ','
+          - column_reference:
+            - naked_identifier: Inserted
+            - dot: .
+            - naked_identifier: LocationID
+          - comma: ','
+          - column_reference:
+            - naked_identifier: Inserted
+            - dot: .
+            - naked_identifier: Quantity
+          - alias_expression:
+              keyword: AS
+              naked_identifier: NewQty
+          - comma: ','
+          - column_reference:
+            - naked_identifier: Deleted
+            - dot: .
+            - naked_identifier: Quantity
+          - alias_expression:
+              keyword: AS
+              naked_identifier: PreviousQty
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
diff --git a/test/fixtures/dialects/tsql/openjson.sql b/test/fixtures/dialects/tsql/openjson.sql
new file mode 100644
index 0000000..62af4d7
--- /dev/null
+++ b/test/fixtures/dialects/tsql/openjson.sql
@@ -0,0 +1,52 @@
+/*
+https://learn.microsoft.com/en-us/sql/t-sql/functions/openjson-transact-sql?view=sql-server-ver16#examples
+*/
+
+SELECT *
+FROM products
+INNER JOIN OPENJSON(N'[1,2,3,4]') AS productTypes
+  ON product.productTypeID = productTypes.value
+; 
+
+SELECT * FROM OPENJSON(@json)
+        WITH (  month VARCHAR(3),
+                temp int,
+                month_id tinyint '$.sql:identity()') as months
+;
+
+SELECT *
+FROM OPENJSON ( @json )  
+WITH (   
+              Number   VARCHAR(200)   '$.Order.Number',  
+              Date     DATETIME       '$.Order.Date',  
+              Customer VARCHAR(200)   '$.AccountNumber',  
+              Quantity INT            '$.Item.Quantity',  
+              [Order]  NVARCHAR(MAX)  AS JSON  
+);
+
+SELECT SalesOrderID, OrderDate, value AS Reason  
+FROM Sales.SalesOrderHeader  
+     CROSS APPLY OPENJSON (SalesReasons) WITH (value NVARCHAR(100) '$')
+;
+
+SELECT store.title, location.street, location.lat, location.long  
+FROM store  
+CROSS APPLY OPENJSON(store.jsonCol, 'lax $.location')   
+     WITH (street VARCHAR(500) ,  postcode VARCHAR(500) '$.postcode' ,  
+     lon int '$.geo.longitude', lat int '$.geo.latitude')  
+     AS location
+;
+
+INSERT INTO Person  
+SELECT *   
+FROM OPENJSON(@json)  
+WITH (id INT,  
+      firstName NVARCHAR(50), lastName NVARCHAR(50),   
+      isAlive BIT, age INT,  
+      dateOfBirth DATETIME, spouse NVARCHAR(50))
+;
+
+SELECT root.[key] AS [Order],TheValues.[key], TheValues.[value]
+FROM OPENJSON ( @JSON ) AS root
+CROSS APPLY OPENJSON ( root.value) AS TheValues
+;
diff --git a/test/fixtures/dialects/tsql/openjson.yml b/test/fixtures/dialects/tsql/openjson.yml
new file mode 100644
index 0000000..000b4f3
--- /dev/null
+++ b/test/fixtures/dialects/tsql/openjson.yml
@@ -0,0 +1,468 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: b0aed6157fb7463eb1ea734057995c2536cb98aabdd752bf4b40ec583132683d
+file:
+  batch:
+  - statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: products
+            join_clause:
+            - keyword: INNER
+            - keyword: JOIN
+            - from_expression_element:
+                table_expression:
+                  function:
+                    function_name:
+                      function_name_identifier: OPENJSON
+                    bracketed:
+                      start_bracket: (
+                      expression:
+                        quoted_literal: "N'[1,2,3,4]'"
+                      end_bracket: )
+                alias_expression:
+                  keyword: AS
+                  naked_identifier: productTypes
+            - join_on_condition:
+                keyword: 'ON'
+                expression:
+                - column_reference:
+                  - naked_identifier: product
+                  - dot: .
+                  - naked_identifier: productTypeID
+                - comparison_operator:
+                    raw_comparison_operator: '='
+                - column_reference:
+                  - naked_identifier: productTypes
+                  - dot: .
+                  - naked_identifier: value
+          statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                openjson_segment:
+                  keyword: OPENJSON
+                  bracketed:
+                    start_bracket: (
+                    column_reference:
+                      parameter: '@json'
+                    end_bracket: )
+                  openjson_with_clause:
+                    keyword: WITH
+                    bracketed:
+                    - start_bracket: (
+                    - column_reference:
+                        naked_identifier: month
+                    - data_type:
+                        data_type_identifier: VARCHAR
+                        bracketed_arguments:
+                          bracketed:
+                            start_bracket: (
+                            expression:
+                              numeric_literal: '3'
+                            end_bracket: )
+                    - comma: ','
+                    - column_reference:
+                        naked_identifier: temp
+                    - data_type:
+                        data_type_identifier: int
+                    - comma: ','
+                    - column_reference:
+                        naked_identifier: month_id
+                    - data_type:
+                        data_type_identifier: tinyint
+                    - quoted_literal: "'$.sql:identity()'"
+                    - end_bracket: )
+              alias_expression:
+                keyword: as
+                naked_identifier: months
+          statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                openjson_segment:
+                  keyword: OPENJSON
+                  bracketed:
+                    start_bracket: (
+                    column_reference:
+                      parameter: '@json'
+                    end_bracket: )
+                  openjson_with_clause:
+                    keyword: WITH
+                    bracketed:
+                    - start_bracket: (
+                    - column_reference:
+                        naked_identifier: Number
+                    - data_type:
+                        data_type_identifier: VARCHAR
+                        bracketed_arguments:
+                          bracketed:
+                            start_bracket: (
+                            expression:
+                              numeric_literal: '200'
+                            end_bracket: )
+                    - quoted_literal: "'$.Order.Number'"
+                    - comma: ','
+                    - column_reference:
+                        naked_identifier: Date
+                    - data_type:
+                        data_type_identifier: DATETIME
+                    - quoted_literal: "'$.Order.Date'"
+                    - comma: ','
+                    - column_reference:
+                        naked_identifier: Customer
+                    - data_type:
+                        data_type_identifier: VARCHAR
+                        bracketed_arguments:
+                          bracketed:
+                            start_bracket: (
+                            expression:
+                              numeric_literal: '200'
+                            end_bracket: )
+                    - quoted_literal: "'$.AccountNumber'"
+                    - comma: ','
+                    - column_reference:
+                        naked_identifier: Quantity
+                    - data_type:
+                        data_type_identifier: INT
+                    - quoted_literal: "'$.Item.Quantity'"
+                    - comma: ','
+                    - column_reference:
+                        quoted_identifier: '[Order]'
+                    - data_type:
+                        data_type_identifier: NVARCHAR
+                        bracketed_arguments:
+                          bracketed:
+                            start_bracket: (
+                            keyword: MAX
+                            end_bracket: )
+                    - keyword: AS
+                    - keyword: JSON
+                    - end_bracket: )
+          statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: SalesOrderID
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: OrderDate
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: value
+            alias_expression:
+              keyword: AS
+              naked_identifier: Reason
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                - naked_identifier: Sales
+                - dot: .
+                - naked_identifier: SalesOrderHeader
+            join_clause:
+            - keyword: CROSS
+            - keyword: APPLY
+            - from_expression_element:
+                table_expression:
+                  openjson_segment:
+                    keyword: OPENJSON
+                    bracketed:
+                      start_bracket: (
+                      column_reference:
+                        naked_identifier: SalesReasons
+                      end_bracket: )
+                    openjson_with_clause:
+                      keyword: WITH
+                      bracketed:
+                        start_bracket: (
+                        column_reference:
+                          naked_identifier: value
+                        data_type:
+                          data_type_identifier: NVARCHAR
+                          bracketed_arguments:
+                            bracketed:
+                              start_bracket: (
+                              expression:
+                                numeric_literal: '100'
+                              end_bracket: )
+                        quoted_literal: "'$'"
+                        end_bracket: )
+          statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+            - naked_identifier: store
+            - dot: .
+            - naked_identifier: title
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+            - naked_identifier: location
+            - dot: .
+            - naked_identifier: street
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+            - naked_identifier: location
+            - dot: .
+            - naked_identifier: lat
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+            - naked_identifier: location
+            - dot: .
+            - naked_identifier: long
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: store
+            join_clause:
+            - keyword: CROSS
+            - keyword: APPLY
+            - from_expression_element:
+                table_expression:
+                  openjson_segment:
+                    keyword: OPENJSON
+                    bracketed:
+                      start_bracket: (
+                      column_reference:
+                      - naked_identifier: store
+                      - dot: .
+                      - naked_identifier: jsonCol
+                      comma: ','
+                      quoted_literal: "'lax $.location'"
+                      end_bracket: )
+                    openjson_with_clause:
+                      keyword: WITH
+                      bracketed:
+                      - start_bracket: (
+                      - column_reference:
+                          naked_identifier: street
+                      - data_type:
+                          data_type_identifier: VARCHAR
+                          bracketed_arguments:
+                            bracketed:
+                              start_bracket: (
+                              expression:
+                                numeric_literal: '500'
+                              end_bracket: )
+                      - comma: ','
+                      - column_reference:
+                          naked_identifier: postcode
+                      - data_type:
+                          data_type_identifier: VARCHAR
+                          bracketed_arguments:
+                            bracketed:
+                              start_bracket: (
+                              expression:
+                                numeric_literal: '500'
+                              end_bracket: )
+                      - quoted_literal: "'$.postcode'"
+                      - comma: ','
+                      - column_reference:
+                          naked_identifier: lon
+                      - data_type:
+                          data_type_identifier: int
+                      - quoted_literal: "'$.geo.longitude'"
+                      - comma: ','
+                      - column_reference:
+                          naked_identifier: lat
+                      - data_type:
+                          data_type_identifier: int
+                      - quoted_literal: "'$.geo.latitude'"
+                      - end_bracket: )
+                alias_expression:
+                  keyword: AS
+                  naked_identifier: location
+          statement_terminator: ;
+  - statement:
+      insert_statement:
+      - keyword: INSERT
+      - keyword: INTO
+      - table_reference:
+          naked_identifier: Person
+      - select_statement:
+          select_clause:
+            keyword: SELECT
+            select_clause_element:
+              wildcard_expression:
+                wildcard_identifier:
+                  star: '*'
+          from_clause:
+            keyword: FROM
+            from_expression:
+              from_expression_element:
+                table_expression:
+                  openjson_segment:
+                    keyword: OPENJSON
+                    bracketed:
+                      start_bracket: (
+                      column_reference:
+                        parameter: '@json'
+                      end_bracket: )
+                    openjson_with_clause:
+                      keyword: WITH
+                      bracketed:
+                      - start_bracket: (
+                      - column_reference:
+                          naked_identifier: id
+                      - data_type:
+                          data_type_identifier: INT
+                      - comma: ','
+                      - column_reference:
+                          naked_identifier: firstName
+                      - data_type:
+                          data_type_identifier: NVARCHAR
+                          bracketed_arguments:
+                            bracketed:
+                              start_bracket: (
+                              expression:
+                                numeric_literal: '50'
+                              end_bracket: )
+                      - comma: ','
+                      - column_reference:
+                          naked_identifier: lastName
+                      - data_type:
+                          data_type_identifier: NVARCHAR
+                          bracketed_arguments:
+                            bracketed:
+                              start_bracket: (
+                              expression:
+                                numeric_literal: '50'
+                              end_bracket: )
+                      - comma: ','
+                      - column_reference:
+                          naked_identifier: isAlive
+                      - data_type:
+                          data_type_identifier: BIT
+                      - comma: ','
+                      - column_reference:
+                          naked_identifier: age
+                      - data_type:
+                          data_type_identifier: INT
+                      - comma: ','
+                      - column_reference:
+                          naked_identifier: dateOfBirth
+                      - data_type:
+                          data_type_identifier: DATETIME
+                      - comma: ','
+                      - column_reference:
+                          naked_identifier: spouse
+                      - data_type:
+                          data_type_identifier: NVARCHAR
+                          bracketed_arguments:
+                            bracketed:
+                              start_bracket: (
+                              expression:
+                                numeric_literal: '50'
+                              end_bracket: )
+                      - end_bracket: )
+            statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: root
+              dot: .
+              quoted_identifier: '[key]'
+            alias_expression:
+              keyword: AS
+              quoted_identifier: '[Order]'
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: TheValues
+              dot: .
+              quoted_identifier: '[key]'
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: TheValues
+              dot: .
+              quoted_identifier: '[value]'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                function:
+                  function_name:
+                    function_name_identifier: OPENJSON
+                  bracketed:
+                    start_bracket: (
+                    expression:
+                      parameter: '@JSON'
+                    end_bracket: )
+              alias_expression:
+                keyword: AS
+                naked_identifier: root
+            join_clause:
+            - keyword: CROSS
+            - keyword: APPLY
+            - from_expression_element:
+                table_expression:
+                  function:
+                    function_name:
+                      function_name_identifier: OPENJSON
+                    bracketed:
+                      start_bracket: (
+                      expression:
+                        column_reference:
+                        - naked_identifier: root
+                        - dot: .
+                        - naked_identifier: value
+                      end_bracket: )
+                alias_expression:
+                  keyword: AS
+                  naked_identifier: TheValues
+          statement_terminator: ;
diff --git a/test/fixtures/dialects/tsql/print.yml b/test/fixtures/dialects/tsql/print.yml
index 241f27a..d2d32ab 100644
--- a/test/fixtures/dialects/tsql/print.yml
+++ b/test/fixtures/dialects/tsql/print.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 5da646ea7ac1fc4b9d5602093d75a4521539f0acbe14d93e17ac7b7cccd339ad
+_hash: 683357bdaa8ac0f3bc686790545ee9e82d84517b8cb80ce8b441081659f15244
 file:
   batch:
   - statement:
@@ -12,11 +12,12 @@ file:
         parameter: '@TestVal'
         data_type:
           data_type_identifier: VARCHAR
-          bracketed:
-            start_bracket: (
-            expression:
-              numeric_literal: '20'
-            end_bracket: )
+          bracketed_arguments:
+            bracketed:
+              start_bracket: (
+              expression:
+                numeric_literal: '20'
+              end_bracket: )
         comparison_operator:
           raw_comparison_operator: '='
         expression:
@@ -45,11 +46,12 @@ file:
               keyword: AS
               data_type:
                 data_type_identifier: VARCHAR
-                bracketed:
-                  start_bracket: (
-                  expression:
-                    numeric_literal: '50'
-                  end_bracket: )
+                bracketed_arguments:
+                  bracketed:
+                    start_bracket: (
+                    expression:
+                      numeric_literal: '50'
+                    end_bracket: )
               end_bracket: )
         statement_terminator: ;
   - statement:
diff --git a/test/fixtures/dialects/tsql/select.sql b/test/fixtures/dialects/tsql/select.sql
index fda356c..360f031 100644
--- a/test/fixtures/dialects/tsql/select.sql
+++ b/test/fixtures/dialects/tsql/select.sql
@@ -98,8 +98,15 @@ SELECT
 	[following]	= count(*) over(order by object_id ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING),
 
     EqualsAlias = ColumnName,
-    OtherColumnName AS AsAlias
+    OtherColumnName AS AsAlias,
+	cast(1 as character varying(1)),
+	cast([central] as int),
 
+    --unbracketed functions
+    CURRENT_TIMESTAMP,
+    CURRENT_USER,
+    SESSION_USER,
+    SYSTEM_USER
 
-FROM dbo . all_pop
 
+FROM dbo . all_pop
diff --git a/test/fixtures/dialects/tsql/select.yml b/test/fixtures/dialects/tsql/select.yml
index 7a50a3a..2ae9121 100644
--- a/test/fixtures/dialects/tsql/select.yml
+++ b/test/fixtures/dialects/tsql/select.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 7630efe8a7ecc90a37d262198a0df9e9db21e846a122560e8cd1f28f945a070f
+_hash: 5ce6e3c7c4dc3263e4f470a51c0b7c52a2173bba1d16f2852067b0d232b40bc6
 file:
   batch:
     statement:
@@ -822,6 +822,52 @@ file:
             alias_expression:
               keyword: AS
               naked_identifier: AsAlias
+        - comma: ','
+        - select_clause_element:
+            function:
+              function_name:
+                keyword: cast
+              bracketed:
+                start_bracket: (
+                expression:
+                  numeric_literal: '1'
+                keyword: as
+                data_type:
+                  data_type_identifier: character
+                  keyword: varying
+                  bracketed_arguments:
+                    bracketed:
+                      start_bracket: (
+                      expression:
+                        numeric_literal: '1'
+                      end_bracket: )
+                end_bracket: )
+        - comma: ','
+        - select_clause_element:
+            function:
+              function_name:
+                keyword: cast
+              bracketed:
+                start_bracket: (
+                expression:
+                  column_reference:
+                    quoted_identifier: '[central]'
+                keyword: as
+                data_type:
+                  data_type_identifier: int
+                end_bracket: )
+        - comma: ','
+        - select_clause_element:
+            bare_function: CURRENT_TIMESTAMP
+        - comma: ','
+        - select_clause_element:
+            bare_function: CURRENT_USER
+        - comma: ','
+        - select_clause_element:
+            bare_function: SESSION_USER
+        - comma: ','
+        - select_clause_element:
+            bare_function: SYSTEM_USER
         from_clause:
           keyword: FROM
           from_expression:
diff --git a/test/fixtures/dialects/tsql/select_for.sql b/test/fixtures/dialects/tsql/select_for.sql
new file mode 100644
index 0000000..955f2d4
--- /dev/null
+++ b/test/fixtures/dialects/tsql/select_for.sql
@@ -0,0 +1,55 @@
+-- FOR JSON
+
+SELECT name, surname
+FROM emp
+FOR JSON AUTO;
+GO
+
+SELECT 1 AS a
+FOR JSON PATH;
+GO
+
+SELECT 1 AS a
+FOR JSON PATH, WITHOUT_ARRAY_WRAPPER
+GO
+
+SELECT c.ClassName,
+    s.StudentName
+FROM #tabClass AS c
+RIGHT JOIN #tabStudent AS s ON s.ClassGuid = c.ClassGuid
+ORDER BY c.ClassName,
+    s.StudentName
+FOR JSON AUTO;
+GO
+
+SELECT 1 AS a
+FOR JSON PATH, ROOT ('RootName'), WITHOUT_ARRAY_WRAPPER, INCLUDE_NULL_VALUES;
+GO
+
+-- FOR XML
+
+SELECT ProductModelID, Name
+FROM Production.ProductModel
+WHERE ProductModelID=122 or ProductModelID=119
+FOR XML RAW;
+
+SELECT ProductPhotoID, ThumbNailPhoto
+FROM   Production.ProductPhoto
+WHERE ProductPhotoID=70
+FOR XML AUTO;
+
+SELECT 1    as Tag
+FROM   HumanResources.Employee AS E
+FOR XML EXPLICIT;
+
+SELECT
+    ProductModelID,
+    Name
+FROM Production.ProductModel
+WHERE ProductModelID=122 OR ProductModelID=119
+FOR XML PATH ('root');
+
+-- FOR BROWSE
+SELECT 1 AS a
+FOR BROWSE
+GO
diff --git a/test/fixtures/dialects/tsql/select_for.yml b/test/fixtures/dialects/tsql/select_for.yml
new file mode 100644
index 0000000..8d7af28
--- /dev/null
+++ b/test/fixtures/dialects/tsql/select_for.yml
@@ -0,0 +1,319 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: 9475bf138df2b698b5fcdea7d4d615c8ea76ce29b7e3f08dc3b1b10335f5b6a8
+file:
+- batch:
+    statement:
+      select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: name
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: surname
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: emp
+        for_clause:
+        - keyword: FOR
+        - keyword: JSON
+        - keyword: AUTO
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            numeric_literal: '1'
+            alias_expression:
+              keyword: AS
+              naked_identifier: a
+        for_clause:
+        - keyword: FOR
+        - keyword: JSON
+        - keyword: PATH
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            numeric_literal: '1'
+            alias_expression:
+              keyword: AS
+              naked_identifier: a
+        for_clause:
+        - keyword: FOR
+        - keyword: JSON
+        - keyword: PATH
+        - comma: ','
+        - keyword: WITHOUT_ARRAY_WRAPPER
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+            - naked_identifier: c
+            - dot: .
+            - naked_identifier: ClassName
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+            - naked_identifier: s
+            - dot: .
+            - naked_identifier: StudentName
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  hash_identifier: '#tabClass'
+              alias_expression:
+                keyword: AS
+                naked_identifier: c
+            join_clause:
+            - keyword: RIGHT
+            - keyword: JOIN
+            - from_expression_element:
+                table_expression:
+                  table_reference:
+                    hash_identifier: '#tabStudent'
+                alias_expression:
+                  keyword: AS
+                  naked_identifier: s
+            - join_on_condition:
+                keyword: 'ON'
+                expression:
+                - column_reference:
+                  - naked_identifier: s
+                  - dot: .
+                  - naked_identifier: ClassGuid
+                - comparison_operator:
+                    raw_comparison_operator: '='
+                - column_reference:
+                  - naked_identifier: c
+                  - dot: .
+                  - naked_identifier: ClassGuid
+        orderby_clause:
+        - keyword: ORDER
+        - keyword: BY
+        - column_reference:
+          - naked_identifier: c
+          - dot: .
+          - naked_identifier: ClassName
+        - comma: ','
+        - column_reference:
+          - naked_identifier: s
+          - dot: .
+          - naked_identifier: StudentName
+        for_clause:
+        - keyword: FOR
+        - keyword: JSON
+        - keyword: AUTO
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            numeric_literal: '1'
+            alias_expression:
+              keyword: AS
+              naked_identifier: a
+        for_clause:
+        - keyword: FOR
+        - keyword: JSON
+        - keyword: PATH
+        - comma: ','
+        - keyword: ROOT
+        - bracketed:
+            start_bracket: (
+            quoted_literal: "'RootName'"
+            end_bracket: )
+        - comma: ','
+        - keyword: WITHOUT_ARRAY_WRAPPER
+        - comma: ','
+        - keyword: INCLUDE_NULL_VALUES
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+  - statement:
+      select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: ProductModelID
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: Name
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                - naked_identifier: Production
+                - dot: .
+                - naked_identifier: ProductModel
+        where_clause:
+          keyword: WHERE
+          expression:
+          - column_reference:
+              naked_identifier: ProductModelID
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - numeric_literal: '122'
+          - binary_operator: or
+          - column_reference:
+              naked_identifier: ProductModelID
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - numeric_literal: '119'
+        for_clause:
+        - keyword: FOR
+        - keyword: XML
+        - keyword: RAW
+  - statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: ProductPhotoID
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: ThumbNailPhoto
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                - naked_identifier: Production
+                - dot: .
+                - naked_identifier: ProductPhoto
+        where_clause:
+          keyword: WHERE
+          expression:
+            column_reference:
+              naked_identifier: ProductPhotoID
+            comparison_operator:
+              raw_comparison_operator: '='
+            numeric_literal: '70'
+        for_clause:
+        - keyword: FOR
+        - keyword: XML
+        - keyword: AUTO
+  - statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            numeric_literal: '1'
+            alias_expression:
+              keyword: as
+              naked_identifier: Tag
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                - naked_identifier: HumanResources
+                - dot: .
+                - naked_identifier: Employee
+              alias_expression:
+                keyword: AS
+                naked_identifier: E
+        for_clause:
+        - keyword: FOR
+        - keyword: XML
+        - keyword: EXPLICIT
+  - statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+        - keyword: SELECT
+        - select_clause_element:
+            column_reference:
+              naked_identifier: ProductModelID
+        - comma: ','
+        - select_clause_element:
+            column_reference:
+              naked_identifier: Name
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                - naked_identifier: Production
+                - dot: .
+                - naked_identifier: ProductModel
+        where_clause:
+          keyword: WHERE
+          expression:
+          - column_reference:
+              naked_identifier: ProductModelID
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - numeric_literal: '122'
+          - binary_operator: OR
+          - column_reference:
+              naked_identifier: ProductModelID
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - numeric_literal: '119'
+        for_clause:
+        - keyword: FOR
+        - keyword: XML
+        - keyword: PATH
+        - bracketed:
+            start_bracket: (
+            quoted_literal: "'root'"
+            end_bracket: )
+  - statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            numeric_literal: '1'
+            alias_expression:
+              keyword: AS
+              naked_identifier: a
+        for_clause:
+        - keyword: FOR
+        - keyword: BROWSE
+- go_statement:
+    keyword: GO
diff --git a/test/fixtures/dialects/tsql/sequence.yml b/test/fixtures/dialects/tsql/sequence.yml
index 4c98038..d7a502d 100644
--- a/test/fixtures/dialects/tsql/sequence.yml
+++ b/test/fixtures/dialects/tsql/sequence.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 55438b24f9c3193ab3eae96d8dc40ef8d77e87dfa87b431d31c7fce238cd8387
+_hash: e0e8fd69019db0ed39c2baf5e6e04d8015b32483129f0e256d19de7838d1a0f7
 file:
 - batch:
     statement:
@@ -35,14 +35,15 @@ file:
           keyword: AS
           data_type:
             data_type_identifier: decimal
-            bracketed:
-            - start_bracket: (
-            - expression:
-                numeric_literal: '3'
-            - comma: ','
-            - expression:
-                numeric_literal: '0'
-            - end_bracket: )
+            bracketed_arguments:
+              bracketed:
+              - start_bracket: (
+              - expression:
+                  numeric_literal: '3'
+              - comma: ','
+              - expression:
+                  numeric_literal: '0'
+              - end_bracket: )
       - create_sequence_options_segment:
         - keyword: START
         - keyword: WITH
diff --git a/test/fixtures/dialects/tsql/sqlcmd_command.sql b/test/fixtures/dialects/tsql/sqlcmd_command.sql
new file mode 100644
index 0000000..5326896
--- /dev/null
+++ b/test/fixtures/dialects/tsql/sqlcmd_command.sql
@@ -0,0 +1,13 @@
+/*
+https://learn.microsoft.com/en-us/sql/tools/sqlcmd/sqlcmd-utility?view=sql-server-ver16#sqlcmd-commands
+*/
+
+-- reference / execute other SQL files
+:r script.sql
+:r script#01_a-b.sql
+:r ...\folder\script.SQL
+:r .\folder_1\folder_2\folder_3\folder_4\script.sql
+
+-- define *sqlcmd* scripting variable
+:setvar variable_name variable_value
+:setvar variable_name "variable_value"
diff --git a/test/fixtures/dialects/tsql/sqlcmd_command.yml b/test/fixtures/dialects/tsql/sqlcmd_command.yml
new file mode 100644
index 0000000..f826cb9
--- /dev/null
+++ b/test/fixtures/dialects/tsql/sqlcmd_command.yml
@@ -0,0 +1,42 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: dc299a21031d83e755691f36814251ee24c947e2ed3c2f6b5372a2ca95ebbce3
+file:
+  batch:
+  - statement:
+      sqlcmd_command_segment:
+        colon: ':'
+        sqlcmd_operator: r
+        unquoted_relative_sql_file_path: script.sql
+  - statement:
+      sqlcmd_command_segment:
+        colon: ':'
+        sqlcmd_operator: r
+        unquoted_relative_sql_file_path: script#01_a-b.sql
+  - statement:
+      sqlcmd_command_segment:
+        colon: ':'
+        sqlcmd_operator: r
+        unquoted_relative_sql_file_path: '...\folder\script.SQL'
+  - statement:
+      sqlcmd_command_segment:
+        colon: ':'
+        sqlcmd_operator: r
+        unquoted_relative_sql_file_path: .\folder_1\folder_2\folder_3\folder_4\script.sql
+  - statement:
+      sqlcmd_command_segment:
+        colon: ':'
+        sqlcmd_operator: setvar
+        object_reference:
+          naked_identifier: variable_name
+        raw: variable_value
+  - statement:
+      sqlcmd_command_segment:
+        colon: ':'
+        sqlcmd_operator: setvar
+        object_reference:
+          naked_identifier: variable_name
+        double_quote: '"variable_value"'
diff --git a/test/fixtures/dialects/tsql/stored_procedure_single_statement.yml b/test/fixtures/dialects/tsql/stored_procedure_single_statement.yml
index 1f91432..fa36796 100644
--- a/test/fixtures/dialects/tsql/stored_procedure_single_statement.yml
+++ b/test/fixtures/dialects/tsql/stored_procedure_single_statement.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 9c8937fc2416f96d82bcc938dd539ee97a0f9325979556e8bf1d29956bfad457
+_hash: 3e7b1bb873be22b78273c31cc7f23750f41e2ccc8bcac842a2ffcccdb1027d2b
 file:
   batch:
     create_procedure_statement:
@@ -25,11 +25,12 @@ file:
         - parameter: '@Orange'
         - data_type:
             data_type_identifier: varchar
-            bracketed:
-              start_bracket: (
-              expression:
-                numeric_literal: '100'
-              end_bracket: )
+            bracketed_arguments:
+              bracketed:
+                start_bracket: (
+                expression:
+                  numeric_literal: '100'
+                end_bracket: )
         - end_bracket: )
     - keyword: AS
     - procedure_statement:
diff --git a/test/fixtures/dialects/tsql/table_variables.yml b/test/fixtures/dialects/tsql/table_variables.yml
index 6cce4d6..86bee43 100644
--- a/test/fixtures/dialects/tsql/table_variables.yml
+++ b/test/fixtures/dialects/tsql/table_variables.yml
@@ -3,7 +3,7 @@
 # computed by SQLFluff when running the tests. Please run
 # `python test/generate_parse_fixture_yml.py`  to generate them after adding or
 # altering SQL files.
-_hash: 512c12d4847dc7dbbf698c6196f18b67857899b2bfc9f15232b5fceaa093e2b1
+_hash: 862b2714c5dba3bc89677f4191a6c8ab1c6135281db59a5ba2e5b9183b80c83c
 file:
   batch:
     statement:
@@ -22,9 +22,10 @@ file:
             naked_identifier: url
             data_type:
               data_type_identifier: nvarchar
-              bracketed:
-                start_bracket: (
-                expression:
-                  numeric_literal: '100'
-                end_bracket: )
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
         - end_bracket: )
diff --git a/test/fixtures/dialects/tsql/tablesample.sql b/test/fixtures/dialects/tsql/tablesample.sql
new file mode 100644
index 0000000..8856de4
--- /dev/null
+++ b/test/fixtures/dialects/tsql/tablesample.sql
@@ -0,0 +1,11 @@
+SELECT *
+FROM Sales.Customer TABLESAMPLE SYSTEM (10 PERCENT);
+
+SELECT *
+FROM Sales.Customer TABLESAMPLE (10 ROWS);
+
+SELECT *
+FROM Sales.Customer TABLESAMPLE (10);
+
+SELECT *
+FROM Sales.Customer TABLESAMPLE SYSTEM (10 ROWS) REPEATABLE (100);
diff --git a/test/fixtures/dialects/tsql/tablesample.yml b/test/fixtures/dialects/tsql/tablesample.yml
new file mode 100644
index 0000000..0b413a8
--- /dev/null
+++ b/test/fixtures/dialects/tsql/tablesample.yml
@@ -0,0 +1,114 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: d871c98295ef570f2ce25f7f7b737c474e7cee35204d5e3e0143b5cbfb93f541
+file:
+  batch:
+  - statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                - naked_identifier: Sales
+                - dot: .
+                - naked_identifier: Customer
+              sample_expression:
+              - keyword: TABLESAMPLE
+              - keyword: SYSTEM
+              - bracketed:
+                  start_bracket: (
+                  numeric_literal: '10'
+                  keyword: PERCENT
+                  end_bracket: )
+          statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                - naked_identifier: Sales
+                - dot: .
+                - naked_identifier: Customer
+              sample_expression:
+                keyword: TABLESAMPLE
+                bracketed:
+                  start_bracket: (
+                  numeric_literal: '10'
+                  keyword: ROWS
+                  end_bracket: )
+          statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                - naked_identifier: Sales
+                - dot: .
+                - naked_identifier: Customer
+              sample_expression:
+                keyword: TABLESAMPLE
+                bracketed:
+                  start_bracket: (
+                  numeric_literal: '10'
+                  end_bracket: )
+          statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                - naked_identifier: Sales
+                - dot: .
+                - naked_identifier: Customer
+              sample_expression:
+              - keyword: TABLESAMPLE
+              - keyword: SYSTEM
+              - bracketed:
+                  start_bracket: (
+                  numeric_literal: '10'
+                  keyword: ROWS
+                  end_bracket: )
+              - keyword: REPEATABLE
+              - bracketed:
+                  start_bracket: (
+                  numeric_literal: '100'
+                  end_bracket: )
+          statement_terminator: ;
diff --git a/test/fixtures/dialects/tsql/temporal_tables.sql b/test/fixtures/dialects/tsql/temporal_tables.sql
new file mode 100644
index 0000000..81ca603
--- /dev/null
+++ b/test/fixtures/dialects/tsql/temporal_tables.sql
@@ -0,0 +1,143 @@
+-- Select Query Temporal Tables
+
+SELECT * FROM Employee
+  FOR SYSTEM_TIME
+    BETWEEN '2021-01-01 00:00:00.0000000' AND '2022-01-01 00:00:00.0000000';
+
+SELECT * FROM Employee
+  FOR SYSTEM_TIME ALL;
+
+
+ SELECT * FROM Employee
+  FOR SYSTEM_TIME
+    FROM '2021-01-01 00:00:00.0000000' TO '2022-01-01 00:00:00.0000000';
+
+SELECT * FROM Employee
+  FOR SYSTEM_TIME
+    AS OF '2021-01-01 00:00:00.0000000';
+
+SELECT * FROM Employee
+  FOR SYSTEM_TIME
+    CONTAINED IN ('2021-01-01 00:00:00.0000000', '2022-01-01 00:00:00.0000000');
+
+-- Create Temporal Tables
+
+CREATE TABLE [dbo].[EC DC] (
+    [Column B] [varchar](100),
+    [ColumnC] varchar(100),
+    [ColumnDecimal] decimal(10,3)
+)
+WITH (SYSTEM_VERSIONING = ON (HISTORY_TABLE = dbo.EmployeeHistory), DURABILITY = SCHEMA_ONLY );
+;
+GO
+
+
+-- https://learn.microsoft.com/en-us/sql/relational-databases/tables/creating-a-system-versioned-temporal-table?view=sql-server-ver16#creating-a-temporal-table-with-a-default-history-table
+CREATE TABLE Department
+(
+    DeptID INT NOT NULL PRIMARY KEY CLUSTERED
+  , DeptName VARCHAR(50) NOT NULL
+  , ManagerID INT NULL
+  , ParentDeptID INT NULL
+  , ValidFrom DATETIME2 GENERATED ALWAYS AS ROW START NOT NULL
+  , ValidTo DATETIME2 GENERATED ALWAYS AS ROW END NOT NULL
+  , PERIOD FOR SYSTEM_TIME (ValidFrom, ValidTo)
+)
+WITH (SYSTEM_VERSIONING = ON (HISTORY_TABLE = dbo.DepartmentHistory))
+;
+GO
+
+CREATE TABLE [dbo].[EC DC] (
+    [Column B] [varchar](100),
+    [ColumnC] varchar(100),
+    [ColumnDecimal] decimal(10,3)
+)
+WITH (FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME = COLUMNC );
+;
+GO
+
+CREATE TABLE [dbo].[EC DC] (
+    [Column B] [varchar](100),
+    [ColumnC] varchar(100),
+    [ColumnDecimal] decimal(10,3)
+)
+WITH (DATA_DELETION = ON (FILTER_COLUMN = ColumnC, RETENTION_PERIOD = INFINITE));
+;
+GO
+
+CREATE TABLE [dbo].[EC DC] (
+    [Column B] [varchar](100),
+    [ColumnC] varchar(100),
+    [ColumnDecimal] decimal(10,3)
+)
+WITH
+(
+    MEMORY_OPTIMIZED = ON,
+    DURABILITY = SCHEMA_AND_DATA,
+    SYSTEM_VERSIONING = ON (HISTORY_TABLE = History.DepartmentHistory)
+);
+GO
+
+CREATE TABLE [dbo].[EC DC] (
+    [Column B] [varchar](100),
+    [ColumnC] varchar(100),
+    [ColumnDecimal] decimal(10,3)
+)
+WITH
+(
+    REMOTE_DATA_ARCHIVE = OFF ( MIGRATION_STATE = PAUSED ),
+    LEDGER = ON (LEDGER_VIEW = dbo.ABC (TRANSACTION_ID_COLUMN_NAME = [ColumnC], SEQUENCE_NUMBER_COLUMN_NAME = [ColumnDecimal]))
+);
+GO
+
+CREATE TABLE [dbo].[EC DC] (
+    [Column B] [varchar](100),
+    [ColumnC] varchar(100),
+    [ColumnDecimal] decimal(10,3)
+)
+WITH
+(
+    DATA_COMPRESSION = ROW
+    XML_COMPRESSION = ON ON PARTITIONS (2)
+);
+GO
+
+CREATE TABLE [dbo].[EC DC] (
+    [Column B] [varchar](100),
+    [ColumnC] varchar(100),
+    [ColumnDecimal] decimal(10,3)
+)
+WITH
+(
+    DATA_COMPRESSION = PAGE ON PARTITIONS (3, 5)
+    XML_COMPRESSION = OFF
+);
+GO
+
+CREATE TABLE [dbo].[EC DC] (
+    [Column B] [varchar](100),
+    [ColumnC] varchar(100),
+    [ColumnDecimal] decimal(10,3)
+)
+WITH
+(
+    XML_COMPRESSION = ON ON PARTITIONS (3 TO 5),
+    FILETABLE_DIRECTORY = '/path1/path2',
+    FILETABLE_COLLATE_FILENAME = constraint1,
+    FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME = constraint2,
+    FILETABLE_STREAMID_UNIQUE_CONSTRAINT_NAME = constraint3,
+    FILETABLE_FULLPATH_UNIQUE_CONSTRAINT_NAME = constraint4
+);
+GO
+
+CREATE TABLE [dbo].[EC DC] (
+    [Column B] [varchar](100),
+    [ColumnC] varchar(100),
+    [ColumnDecimal] decimal(10,3)
+)
+WITH
+(
+    REMOTE_DATA_ARCHIVE = ON ( FILTER_PREDICATE = NULL, MIGRATION_STATE = OUTBOUND),
+    LEDGER = ON (LEDGER_VIEW = dbo.ABC, APPEND_ONLY = ON)
+);
+GO
diff --git a/test/fixtures/dialects/tsql/temporal_tables.yml b/test/fixtures/dialects/tsql/temporal_tables.yml
new file mode 100644
index 0000000..d18db95
--- /dev/null
+++ b/test/fixtures/dialects/tsql/temporal_tables.yml
@@ -0,0 +1,944 @@
+# YML test files are auto-generated from SQL files and should not be edited by
+# hand. To help enforce this, the "hash" field in the file must match a hash
+# computed by SQLFluff when running the tests. Please run
+# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
+# altering SQL files.
+_hash: bbd05ea5802b6edbcb5baf6d32bf80afd3ce4fa374f6cf87ff658b3bf31190a8
+file:
+- batch:
+  - statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: Employee
+              temporal_query:
+              - keyword: FOR
+              - keyword: SYSTEM_TIME
+              - keyword: BETWEEN
+              - quoted_literal: "'2021-01-01 00:00:00.0000000'"
+              - keyword: AND
+              - quoted_literal: "'2022-01-01 00:00:00.0000000'"
+          statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: Employee
+              temporal_query:
+              - keyword: FOR
+              - keyword: SYSTEM_TIME
+              - keyword: ALL
+          statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: Employee
+              temporal_query:
+              - keyword: FOR
+              - keyword: SYSTEM_TIME
+              - keyword: FROM
+              - quoted_literal: "'2021-01-01 00:00:00.0000000'"
+              - keyword: TO
+              - quoted_literal: "'2022-01-01 00:00:00.0000000'"
+          statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: Employee
+              temporal_query:
+              - keyword: FOR
+              - keyword: SYSTEM_TIME
+              - keyword: AS
+              - keyword: OF
+              - quoted_literal: "'2021-01-01 00:00:00.0000000'"
+          statement_terminator: ;
+  - statement:
+      select_statement:
+        select_clause:
+          keyword: SELECT
+          select_clause_element:
+            wildcard_expression:
+              wildcard_identifier:
+                star: '*'
+        from_clause:
+          keyword: FROM
+          from_expression:
+            from_expression_element:
+              table_expression:
+                table_reference:
+                  naked_identifier: Employee
+              temporal_query:
+              - keyword: FOR
+              - keyword: SYSTEM_TIME
+              - keyword: CONTAINED
+              - keyword: IN
+              - bracketed:
+                - start_bracket: (
+                - quoted_literal: "'2021-01-01 00:00:00.0000000'"
+                - comma: ','
+                - quoted_literal: "'2022-01-01 00:00:00.0000000'"
+                - end_bracket: )
+          statement_terminator: ;
+  - statement:
+      create_table_statement:
+      - keyword: CREATE
+      - keyword: TABLE
+      - table_reference:
+        - quoted_identifier: '[dbo]'
+        - dot: .
+        - quoted_identifier: '[EC DC]'
+      - bracketed:
+        - start_bracket: (
+        - column_definition:
+            quoted_identifier: '[Column B]'
+            data_type:
+              data_type_identifier: '[varchar]'
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnC]'
+            data_type:
+              data_type_identifier: varchar
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnDecimal]'
+            data_type:
+              data_type_identifier: decimal
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
+        - end_bracket: )
+      - table_option_statement:
+          keyword: WITH
+          bracketed:
+          - start_bracket: (
+          - keyword: SYSTEM_VERSIONING
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'ON'
+          - bracketed:
+              start_bracket: (
+              keyword: HISTORY_TABLE
+              comparison_operator:
+                raw_comparison_operator: '='
+              table_reference:
+              - naked_identifier: dbo
+              - dot: .
+              - naked_identifier: EmployeeHistory
+              end_bracket: )
+          - comma: ','
+          - keyword: DURABILITY
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: SCHEMA_ONLY
+          - end_bracket: )
+      - statement_terminator: ;
+  - statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      create_table_statement:
+      - keyword: CREATE
+      - keyword: TABLE
+      - table_reference:
+          naked_identifier: Department
+      - bracketed:
+        - start_bracket: (
+        - column_definition:
+          - naked_identifier: DeptID
+          - data_type:
+              data_type_identifier: INT
+          - column_constraint_segment:
+            - keyword: NOT
+            - keyword: 'NULL'
+          - column_constraint_segment:
+            - keyword: PRIMARY
+            - keyword: KEY
+            - keyword: CLUSTERED
+        - comma: ','
+        - column_definition:
+            naked_identifier: DeptName
+            data_type:
+              data_type_identifier: VARCHAR
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '50'
+                  end_bracket: )
+            column_constraint_segment:
+            - keyword: NOT
+            - keyword: 'NULL'
+        - comma: ','
+        - column_definition:
+            naked_identifier: ManagerID
+            data_type:
+              data_type_identifier: INT
+            column_constraint_segment:
+              keyword: 'NULL'
+        - comma: ','
+        - column_definition:
+            naked_identifier: ParentDeptID
+            data_type:
+              data_type_identifier: INT
+            column_constraint_segment:
+              keyword: 'NULL'
+        - comma: ','
+        - column_definition:
+          - naked_identifier: ValidFrom
+          - data_type:
+              data_type_identifier: DATETIME2
+          - column_constraint_segment:
+            - keyword: GENERATED
+            - keyword: ALWAYS
+            - keyword: AS
+            - keyword: ROW
+            - keyword: START
+          - column_constraint_segment:
+            - keyword: NOT
+            - keyword: 'NULL'
+        - comma: ','
+        - column_definition:
+          - naked_identifier: ValidTo
+          - data_type:
+              data_type_identifier: DATETIME2
+          - column_constraint_segment:
+            - keyword: GENERATED
+            - keyword: ALWAYS
+            - keyword: AS
+            - keyword: ROW
+            - keyword: END
+          - column_constraint_segment:
+            - keyword: NOT
+            - keyword: 'NULL'
+        - comma: ','
+        - period_segment:
+          - keyword: PERIOD
+          - keyword: FOR
+          - keyword: SYSTEM_TIME
+          - bracketed:
+            - start_bracket: (
+            - column_reference:
+                naked_identifier: ValidFrom
+            - comma: ','
+            - column_reference:
+                naked_identifier: ValidTo
+            - end_bracket: )
+        - end_bracket: )
+      - table_option_statement:
+          keyword: WITH
+          bracketed:
+          - start_bracket: (
+          - keyword: SYSTEM_VERSIONING
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'ON'
+          - bracketed:
+              start_bracket: (
+              keyword: HISTORY_TABLE
+              comparison_operator:
+                raw_comparison_operator: '='
+              table_reference:
+              - naked_identifier: dbo
+              - dot: .
+              - naked_identifier: DepartmentHistory
+              end_bracket: )
+          - end_bracket: )
+      - statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      create_table_statement:
+      - keyword: CREATE
+      - keyword: TABLE
+      - table_reference:
+        - quoted_identifier: '[dbo]'
+        - dot: .
+        - quoted_identifier: '[EC DC]'
+      - bracketed:
+        - start_bracket: (
+        - column_definition:
+            quoted_identifier: '[Column B]'
+            data_type:
+              data_type_identifier: '[varchar]'
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnC]'
+            data_type:
+              data_type_identifier: varchar
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnDecimal]'
+            data_type:
+              data_type_identifier: decimal
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
+        - end_bracket: )
+      - table_option_statement:
+          keyword: WITH
+          bracketed:
+            start_bracket: (
+            keyword: FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME
+            comparison_operator:
+              raw_comparison_operator: '='
+            object_reference:
+              naked_identifier: COLUMNC
+            end_bracket: )
+      - statement_terminator: ;
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      create_table_statement:
+      - keyword: CREATE
+      - keyword: TABLE
+      - table_reference:
+        - quoted_identifier: '[dbo]'
+        - dot: .
+        - quoted_identifier: '[EC DC]'
+      - bracketed:
+        - start_bracket: (
+        - column_definition:
+            quoted_identifier: '[Column B]'
+            data_type:
+              data_type_identifier: '[varchar]'
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnC]'
+            data_type:
+              data_type_identifier: varchar
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnDecimal]'
+            data_type:
+              data_type_identifier: decimal
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
+        - end_bracket: )
+      - table_option_statement:
+          keyword: WITH
+          bracketed:
+          - start_bracket: (
+          - keyword: DATA_DELETION
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'ON'
+          - bracketed:
+            - start_bracket: (
+            - keyword: FILTER_COLUMN
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - column_reference:
+                naked_identifier: ColumnC
+            - comma: ','
+            - keyword: RETENTION_PERIOD
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - date_part: INFINITE
+            - end_bracket: )
+          - end_bracket: )
+      - statement_terminator: ;
+    statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      create_table_statement:
+      - keyword: CREATE
+      - keyword: TABLE
+      - table_reference:
+        - quoted_identifier: '[dbo]'
+        - dot: .
+        - quoted_identifier: '[EC DC]'
+      - bracketed:
+        - start_bracket: (
+        - column_definition:
+            quoted_identifier: '[Column B]'
+            data_type:
+              data_type_identifier: '[varchar]'
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnC]'
+            data_type:
+              data_type_identifier: varchar
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnDecimal]'
+            data_type:
+              data_type_identifier: decimal
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
+        - end_bracket: )
+      - table_option_statement:
+          keyword: WITH
+          bracketed:
+          - start_bracket: (
+          - keyword: MEMORY_OPTIMIZED
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'ON'
+          - comma: ','
+          - keyword: DURABILITY
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: SCHEMA_AND_DATA
+          - comma: ','
+          - keyword: SYSTEM_VERSIONING
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'ON'
+          - bracketed:
+              start_bracket: (
+              keyword: HISTORY_TABLE
+              comparison_operator:
+                raw_comparison_operator: '='
+              table_reference:
+              - naked_identifier: History
+              - dot: .
+              - naked_identifier: DepartmentHistory
+              end_bracket: )
+          - end_bracket: )
+      - statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      create_table_statement:
+      - keyword: CREATE
+      - keyword: TABLE
+      - table_reference:
+        - quoted_identifier: '[dbo]'
+        - dot: .
+        - quoted_identifier: '[EC DC]'
+      - bracketed:
+        - start_bracket: (
+        - column_definition:
+            quoted_identifier: '[Column B]'
+            data_type:
+              data_type_identifier: '[varchar]'
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnC]'
+            data_type:
+              data_type_identifier: varchar
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnDecimal]'
+            data_type:
+              data_type_identifier: decimal
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
+        - end_bracket: )
+      - table_option_statement:
+          keyword: WITH
+          bracketed:
+          - start_bracket: (
+          - keyword: REMOTE_DATA_ARCHIVE
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'OFF'
+          - bracketed:
+            - start_bracket: (
+            - keyword: MIGRATION_STATE
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - keyword: PAUSED
+            - end_bracket: )
+          - comma: ','
+          - keyword: LEDGER
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'ON'
+          - bracketed:
+              start_bracket: (
+              keyword: LEDGER_VIEW
+              comparison_operator:
+                raw_comparison_operator: '='
+              table_reference:
+              - naked_identifier: dbo
+              - dot: .
+              - naked_identifier: ABC
+              bracketed:
+              - start_bracket: (
+              - keyword: TRANSACTION_ID_COLUMN_NAME
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                  quoted_identifier: '[ColumnC]'
+              - comma: ','
+              - keyword: SEQUENCE_NUMBER_COLUMN_NAME
+              - comparison_operator:
+                  raw_comparison_operator: '='
+              - column_reference:
+                  quoted_identifier: '[ColumnDecimal]'
+              - end_bracket: )
+              end_bracket: )
+          - end_bracket: )
+      - statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      create_table_statement:
+      - keyword: CREATE
+      - keyword: TABLE
+      - table_reference:
+        - quoted_identifier: '[dbo]'
+        - dot: .
+        - quoted_identifier: '[EC DC]'
+      - bracketed:
+        - start_bracket: (
+        - column_definition:
+            quoted_identifier: '[Column B]'
+            data_type:
+              data_type_identifier: '[varchar]'
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnC]'
+            data_type:
+              data_type_identifier: varchar
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnDecimal]'
+            data_type:
+              data_type_identifier: decimal
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
+        - end_bracket: )
+      - table_option_statement:
+          keyword: WITH
+          bracketed:
+          - start_bracket: (
+          - keyword: DATA_COMPRESSION
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: ROW
+          - keyword: XML_COMPRESSION
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'ON'
+          - keyword: 'ON'
+          - keyword: PARTITIONS
+          - bracketed:
+              start_bracket: (
+              numeric_literal: '2'
+              end_bracket: )
+          - end_bracket: )
+      - statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      create_table_statement:
+      - keyword: CREATE
+      - keyword: TABLE
+      - table_reference:
+        - quoted_identifier: '[dbo]'
+        - dot: .
+        - quoted_identifier: '[EC DC]'
+      - bracketed:
+        - start_bracket: (
+        - column_definition:
+            quoted_identifier: '[Column B]'
+            data_type:
+              data_type_identifier: '[varchar]'
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnC]'
+            data_type:
+              data_type_identifier: varchar
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnDecimal]'
+            data_type:
+              data_type_identifier: decimal
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
+        - end_bracket: )
+      - table_option_statement:
+          keyword: WITH
+          bracketed:
+          - start_bracket: (
+          - keyword: DATA_COMPRESSION
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: PAGE
+          - keyword: 'ON'
+          - keyword: PARTITIONS
+          - bracketed:
+            - start_bracket: (
+            - numeric_literal: '3'
+            - comma: ','
+            - numeric_literal: '5'
+            - end_bracket: )
+          - keyword: XML_COMPRESSION
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'OFF'
+          - end_bracket: )
+      - statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      create_table_statement:
+      - keyword: CREATE
+      - keyword: TABLE
+      - table_reference:
+        - quoted_identifier: '[dbo]'
+        - dot: .
+        - quoted_identifier: '[EC DC]'
+      - bracketed:
+        - start_bracket: (
+        - column_definition:
+            quoted_identifier: '[Column B]'
+            data_type:
+              data_type_identifier: '[varchar]'
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnC]'
+            data_type:
+              data_type_identifier: varchar
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnDecimal]'
+            data_type:
+              data_type_identifier: decimal
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
+        - end_bracket: )
+      - table_option_statement:
+          keyword: WITH
+          bracketed:
+          - start_bracket: (
+          - keyword: XML_COMPRESSION
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'ON'
+          - keyword: 'ON'
+          - keyword: PARTITIONS
+          - bracketed:
+            - start_bracket: (
+            - numeric_literal: '3'
+            - keyword: TO
+            - numeric_literal: '5'
+            - end_bracket: )
+          - comma: ','
+          - keyword: FILETABLE_DIRECTORY
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - quoted_literal: "'/path1/path2'"
+          - comma: ','
+          - keyword: FILETABLE_COLLATE_FILENAME
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - object_reference:
+              naked_identifier: constraint1
+          - comma: ','
+          - keyword: FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - object_reference:
+              naked_identifier: constraint2
+          - comma: ','
+          - keyword: FILETABLE_STREAMID_UNIQUE_CONSTRAINT_NAME
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - object_reference:
+              naked_identifier: constraint3
+          - comma: ','
+          - keyword: FILETABLE_FULLPATH_UNIQUE_CONSTRAINT_NAME
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - object_reference:
+              naked_identifier: constraint4
+          - end_bracket: )
+      - statement_terminator: ;
+- go_statement:
+    keyword: GO
+- batch:
+    statement:
+      create_table_statement:
+      - keyword: CREATE
+      - keyword: TABLE
+      - table_reference:
+        - quoted_identifier: '[dbo]'
+        - dot: .
+        - quoted_identifier: '[EC DC]'
+      - bracketed:
+        - start_bracket: (
+        - column_definition:
+            quoted_identifier: '[Column B]'
+            data_type:
+              data_type_identifier: '[varchar]'
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnC]'
+            data_type:
+              data_type_identifier: varchar
+              bracketed_arguments:
+                bracketed:
+                  start_bracket: (
+                  expression:
+                    numeric_literal: '100'
+                  end_bracket: )
+        - comma: ','
+        - column_definition:
+            quoted_identifier: '[ColumnDecimal]'
+            data_type:
+              data_type_identifier: decimal
+              bracketed_arguments:
+                bracketed:
+                - start_bracket: (
+                - expression:
+                    numeric_literal: '10'
+                - comma: ','
+                - expression:
+                    numeric_literal: '3'
+                - end_bracket: )
+        - end_bracket: )
+      - table_option_statement:
+          keyword: WITH
+          bracketed:
+          - start_bracket: (
+          - keyword: REMOTE_DATA_ARCHIVE
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'ON'
+          - bracketed:
+            - start_bracket: (
+            - keyword: FILTER_PREDICATE
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - keyword: 'NULL'
+            - comma: ','
+            - keyword: MIGRATION_STATE
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - keyword: OUTBOUND
+            - end_bracket: )
+          - comma: ','
+          - keyword: LEDGER
+          - comparison_operator:
+              raw_comparison_operator: '='
+          - keyword: 'ON'
+          - bracketed:
+            - start_bracket: (
+            - keyword: LEDGER_VIEW
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - table_reference:
+              - naked_identifier: dbo
+              - dot: .
+              - naked_identifier: ABC
+            - comma: ','
+            - keyword: APPEND_ONLY
+            - comparison_operator:
+                raw_comparison_operator: '='
+            - keyword: 'ON'
+            - end_bracket: )
+          - end_bracket: )
+      - statement_terminator: ;
+- go_statement:
+    keyword: GO
diff --git a/test/fixtures/dialects/tsql/xml.sql b/test/fixtures/dialects/tsql/xml.sql
deleted file mode 100644
index ce412a2..0000000
--- a/test/fixtures/dialects/tsql/xml.sql
+++ /dev/null
@@ -1,20 +0,0 @@
-SELECT ProductModelID, Name
-FROM Production.ProductModel
-WHERE ProductModelID=122 or ProductModelID=119
-FOR XML RAW;
-
-SELECT ProductPhotoID, ThumbNailPhoto
-FROM   Production.ProductPhoto
-WHERE ProductPhotoID=70
-FOR XML AUTO;
-
-SELECT 1    as Tag
-FROM   HumanResources.Employee AS E
-FOR XML EXPLICIT;
-
-SELECT
-    ProductModelID,
-    Name
-FROM Production.ProductModel
-WHERE ProductModelID=122 OR ProductModelID=119
-FOR XML PATH ('root');
diff --git a/test/fixtures/dialects/tsql/xml.yml b/test/fixtures/dialects/tsql/xml.yml
deleted file mode 100644
index 889121e..0000000
--- a/test/fixtures/dialects/tsql/xml.yml
+++ /dev/null
@@ -1,149 +0,0 @@
-# YML test files are auto-generated from SQL files and should not be edited by
-# hand. To help enforce this, the "hash" field in the file must match a hash
-# computed by SQLFluff when running the tests. Please run
-# `python test/generate_parse_fixture_yml.py`  to generate them after adding or
-# altering SQL files.
-_hash: a422ecc512afbe91b18ea0821af5756317bb749bc5abe4ec0cc4ed63e6bcde5d
-file:
-  batch:
-  - statement:
-      select_statement:
-        select_clause:
-        - keyword: SELECT
-        - select_clause_element:
-            column_reference:
-              naked_identifier: ProductModelID
-        - comma: ','
-        - select_clause_element:
-            column_reference:
-              naked_identifier: Name
-        from_clause:
-          keyword: FROM
-          from_expression:
-            from_expression_element:
-              table_expression:
-                table_reference:
-                - naked_identifier: Production
-                - dot: .
-                - naked_identifier: ProductModel
-        where_clause:
-          keyword: WHERE
-          expression:
-          - column_reference:
-              naked_identifier: ProductModelID
-          - comparison_operator:
-              raw_comparison_operator: '='
-          - numeric_literal: '122'
-          - binary_operator: or
-          - column_reference:
-              naked_identifier: ProductModelID
-          - comparison_operator:
-              raw_comparison_operator: '='
-          - numeric_literal: '119'
-        for_xml_segment:
-        - keyword: FOR
-        - keyword: XML
-        - keyword: RAW
-  - statement_terminator: ;
-  - statement:
-      select_statement:
-        select_clause:
-        - keyword: SELECT
-        - select_clause_element:
-            column_reference:
-              naked_identifier: ProductPhotoID
-        - comma: ','
-        - select_clause_element:
-            column_reference:
-              naked_identifier: ThumbNailPhoto
-        from_clause:
-          keyword: FROM
-          from_expression:
-            from_expression_element:
-              table_expression:
-                table_reference:
-                - naked_identifier: Production
-                - dot: .
-                - naked_identifier: ProductPhoto
-        where_clause:
-          keyword: WHERE
-          expression:
-            column_reference:
-              naked_identifier: ProductPhotoID
-            comparison_operator:
-              raw_comparison_operator: '='
-            numeric_literal: '70'
-        for_xml_segment:
-        - keyword: FOR
-        - keyword: XML
-        - keyword: AUTO
-  - statement_terminator: ;
-  - statement:
-      select_statement:
-        select_clause:
-          keyword: SELECT
-          select_clause_element:
-            numeric_literal: '1'
-            alias_expression:
-              keyword: as
-              naked_identifier: Tag
-        from_clause:
-          keyword: FROM
-          from_expression:
-            from_expression_element:
-              table_expression:
-                table_reference:
-                - naked_identifier: HumanResources
-                - dot: .
-                - naked_identifier: Employee
-              alias_expression:
-                keyword: AS
-                naked_identifier: E
-        for_xml_segment:
-        - keyword: FOR
-        - keyword: XML
-        - keyword: EXPLICIT
-  - statement_terminator: ;
-  - statement:
-      select_statement:
-        select_clause:
-        - keyword: SELECT
-        - select_clause_element:
-            column_reference:
-              naked_identifier: ProductModelID
-        - comma: ','
-        - select_clause_element:
-            column_reference:
-              naked_identifier: Name
-        from_clause:
-          keyword: FROM
-          from_expression:
-            from_expression_element:
-              table_expression:
-                table_reference:
-                - naked_identifier: Production
-                - dot: .
-                - naked_identifier: ProductModel
-        where_clause:
-          keyword: WHERE
-          expression:
-          - column_reference:
-              naked_identifier: ProductModelID
-          - comparison_operator:
-              raw_comparison_operator: '='
-          - numeric_literal: '122'
-          - binary_operator: OR
-          - column_reference:
-              naked_identifier: ProductModelID
-          - comparison_operator:
-              raw_comparison_operator: '='
-          - numeric_literal: '119'
-        for_xml_segment:
-        - keyword: FOR
-        - keyword: XML
-        - keyword: PATH
-        - bracketed:
-            start_bracket: (
-            quoted_literal: "'root'"
-            end_bracket: )
-  - statement_terminator: ;
diff --git a/test/fixtures/linter/.gitignore b/test/fixtures/linter/.gitignore
new file mode 100644
index 0000000..19aec36
--- /dev/null
+++ b/test/fixtures/linter/.gitignore
@@ -0,0 +1,2 @@
+# Results of fixed tests
+*_fix.sql
diff --git a/test/fixtures/linter/aliases_in_join_error.sql b/test/fixtures/linter/aliases_in_join_error.sql
deleted file mode 100644
index df6285f..0000000
--- a/test/fixtures/linter/aliases_in_join_error.sql
+++ /dev/null
@@ -1,8 +0,0 @@
-SELECT
-    u.id,
-    c.first_name,
-    c.last_name,
-    COUNT(o.user_id)
-FROM users as u
-JOIN customers as c on u.id = c.user_id
-JOIN orders as o on u.id = o.user_id;
diff --git a/test/fixtures/linter/autofix/ansi/001_long_line/after.sql b/test/fixtures/linter/autofix/ansi/001_long_line/after.sql
index 53dff44..c5e3b96 100644
--- a/test/fixtures/linter/autofix/ansi/001_long_line/after.sql
+++ b/test/fixtures/linter/autofix/ansi/001_long_line/after.sql
@@ -1,17 +1,19 @@
 WITH all_upstream_matches AS (
     SELECT
-        ROW_NUMBER() OVER (
-            PARTITION BY
-                low_business_type,
-                low_size_label,
-                low_gender_label,
-                low_age_label
-            ORDER BY
-                business_type DESC,
-                size_label DESC,
-                gender_label DESC,
-                age_label DESC
-        ) AS rownum,
+        ROW_NUMBER()
+            OVER (
+                PARTITION BY
+                    low_business_type,
+                    low_size_label,
+                    low_gender_label,
+                    low_age_label
+                ORDER BY
+                    business_type DESC,
+                    size_label DESC,
+                    gender_label DESC,
+                    age_label DESC
+            )
+            AS rownum,
         business_type
     FROM
         acceptable_buckets
diff --git a/test/fixtures/linter/autofix/ansi/001_long_line/test-config.yml b/test/fixtures/linter/autofix/ansi/001_long_line/test-config.yml
index dcccaed..98f4f48 100644
--- a/test/fixtures/linter/autofix/ansi/001_long_line/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/001_long_line/test-config.yml
@@ -1,3 +1,3 @@
 test-config:
   rules:
-    - L016
+    - LT05
diff --git a/test/fixtures/linter/autofix/ansi/001_long_line/violations.json b/test/fixtures/linter/autofix/ansi/001_long_line/violations.json
index 4d422a1..10932bb 100644
--- a/test/fixtures/linter/autofix/ansi/001_long_line/violations.json
+++ b/test/fixtures/linter/autofix/ansi/001_long_line/violations.json
@@ -1,8 +1,8 @@
 {
     "violations":{
         "linting":{
-            "L016": [
-                [3, 199]
+            "LT05": [
+                [3, 9]
             ]
         }
     }
diff --git a/test/fixtures/linter/autofix/ansi/002_indentation/after.sql b/test/fixtures/linter/autofix/ansi/002_indentation/after.sql
index e833fdf..bb50394 100644
--- a/test/fixtures/linter/autofix/ansi/002_indentation/after.sql
+++ b/test/fixtures/linter/autofix/ansi/002_indentation/after.sql
@@ -8,14 +8,18 @@ SELECT
     (
         a.over_indented_line
     ) as bar,
-    a.line + (a.with
-              + a.hanging_indent) as actually_ok,
-    a.line + (a.with
-              + a.bad_hanging_indent) as problem,
+    a.line + (
+        a.with
+        + a.hanging_indent
+    ) as actually_ok,
+    a.line + (
+        a.with
+        + a.bad_hanging_indent
+    ) as problem,
     a.line + (
         a.something_indented_well
         + least(
-    	    a.good_example,
+            a.good_example,
             a.bad_example,
             a.really_bad_example,
             a.nother_good_example
diff --git a/test/fixtures/linter/autofix/ansi/002_indentation/test-config.yml b/test/fixtures/linter/autofix/ansi/002_indentation/test-config.yml
index 587d14d..1f7873f 100644
--- a/test/fixtures/linter/autofix/ansi/002_indentation/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/002_indentation/test-config.yml
@@ -1,3 +1,3 @@
 test-config:
   rules:
-    - L003
+    - LT02
diff --git a/test/fixtures/linter/autofix/ansi/003_long_line/after.sql b/test/fixtures/linter/autofix/ansi/003_long_line/after.sql
index 15ca81f..1954d15 100644
--- a/test/fixtures/linter/autofix/ansi/003_long_line/after.sql
+++ b/test/fixtures/linter/autofix/ansi/003_long_line/after.sql
@@ -5,4 +5,4 @@ SELECT
         5 + 6,
         SQRT(a.nother_long_variable_name_of_some_kind)
     ) AS second_one
-FROM this_other_table
+    FROM this_other_table
diff --git a/test/fixtures/linter/autofix/ansi/003_long_line/test-config.yml b/test/fixtures/linter/autofix/ansi/003_long_line/test-config.yml
index dcccaed..98f4f48 100644
--- a/test/fixtures/linter/autofix/ansi/003_long_line/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/003_long_line/test-config.yml
@@ -1,3 +1,3 @@
 test-config:
   rules:
-    - L016
+    - LT05
diff --git a/test/fixtures/linter/autofix/ansi/004_indentation/after.sql b/test/fixtures/linter/autofix/ansi/004_indentation/after.sql
index 0c49bed..df59fea 100644
--- a/test/fixtures/linter/autofix/ansi/004_indentation/after.sql
+++ b/test/fixtures/linter/autofix/ansi/004_indentation/after.sql
@@ -9,7 +9,8 @@ WITH audience_counts AS (
         list_emails.active != 'D'
     GROUP BY
         user_id,
-        list_id)
+        list_id
+)
 
 SELECT
     user_id,
@@ -17,12 +18,14 @@ SELECT
     audience,
     CASE
         WHEN audience > 0 AND audience <= 200 THEN '< 200'
-        WHEN audience > 200
-             AND audience <= 3000
-             -- NB: This one is a hanging indent, which should pass.
-             AND audience <= 2000 THEN '200 - 2,000'
+        WHEN
+            audience > 200
+            AND audience <= 3000
+            -- NB: This one is a hanging indent, which should be modified.
+            AND audience <= 2000 THEN '200 - 2,000'
         WHEN audience > 2000 AND audience <= 10000 THEN '2,000 - 10,000'
-        WHEN audience > 10000
+        WHEN
+            audience > 10000
             AND audience <= 50000 THEN '10,000 - 50,000'
         WHEN audience > 50000 AND audience <= 500000 THEN '50,000 - 500,000'
         WHEN audience > 500000 THEN '> 500,000'
diff --git a/test/fixtures/linter/autofix/ansi/004_indentation/before.sql b/test/fixtures/linter/autofix/ansi/004_indentation/before.sql
index d6044f4..37c636d 100644
--- a/test/fixtures/linter/autofix/ansi/004_indentation/before.sql
+++ b/test/fixtures/linter/autofix/ansi/004_indentation/before.sql
@@ -19,7 +19,7 @@ SELECT
         WHEN audience > 0 AND audience <= 200 THEN '< 200'
         WHEN audience > 200
              AND audience <= 3000
-             -- NB: This one is a hanging indent, which should pass.
+             -- NB: This one is a hanging indent, which should be modified.
              AND audience <= 2000 THEN '200 - 2,000'
         WHEN audience > 2000 AND audience <= 10000 THEN '2,000 - 10,000'
         WHEN audience > 10000
diff --git a/test/fixtures/linter/autofix/ansi/004_indentation/test-config.yml b/test/fixtures/linter/autofix/ansi/004_indentation/test-config.yml
index 587d14d..1f7873f 100644
--- a/test/fixtures/linter/autofix/ansi/004_indentation/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/004_indentation/test-config.yml
@@ -1,3 +1,3 @@
 test-config:
   rules:
-    - L003
+    - LT02
diff --git a/test/fixtures/linter/autofix/ansi/005_function_spacing/test-config.yml b/test/fixtures/linter/autofix/ansi/005_function_spacing/test-config.yml
index 44d69c7..ffa53bf 100644
--- a/test/fixtures/linter/autofix/ansi/005_function_spacing/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/005_function_spacing/test-config.yml
@@ -1,3 +1,3 @@
 test-config:
   rules:
-    - L017
+    - LT06
diff --git a/test/fixtures/linter/autofix/ansi/006_indentation/test-config.yml b/test/fixtures/linter/autofix/ansi/006_indentation/test-config.yml
index 587d14d..1f7873f 100644
--- a/test/fixtures/linter/autofix/ansi/006_indentation/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/006_indentation/test-config.yml
@@ -1,3 +1,3 @@
 test-config:
   rules:
-    - L003
+    - LT02
diff --git a/test/fixtures/linter/autofix/ansi/007_with_clause/after.sql b/test/fixtures/linter/autofix/ansi/007_with_clause/after.sql
index 1734088..7af5816 100644
--- a/test/fixtures/linter/autofix/ansi/007_with_clause/after.sql
+++ b/test/fixtures/linter/autofix/ansi/007_with_clause/after.sql
@@ -1,5 +1,5 @@
 -- Dealing with complicated indents before with clauses.
-    	WITH cte as (
-            select a from tbla
-        )
-        select a from cte
+WITH cte as (
+    select a from tbla
+)
+select a from cte
diff --git a/test/fixtures/linter/autofix/ansi/007_with_clause/test-config.yml b/test/fixtures/linter/autofix/ansi/007_with_clause/test-config.yml
index e6e5d08..eebc94c 100644
--- a/test/fixtures/linter/autofix/ansi/007_with_clause/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/007_with_clause/test-config.yml
@@ -1,4 +1,4 @@
 test-config:
   rules:
-    - L018
-    - L003
+    - LT07
+    - LT02
diff --git a/test/fixtures/linter/autofix/ansi/008_looping_rules_l003_l016_l019/after.sql b/test/fixtures/linter/autofix/ansi/008_looping_rules_LT02_LT04_LT05/after.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/008_looping_rules_l003_l016_l019/after.sql
rename to test/fixtures/linter/autofix/ansi/008_looping_rules_LT02_LT04_LT05/after.sql
diff --git a/test/fixtures/linter/autofix/ansi/008_looping_rules_l003_l016_l019/before.sql b/test/fixtures/linter/autofix/ansi/008_looping_rules_LT02_LT04_LT05/before.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/008_looping_rules_l003_l016_l019/before.sql
rename to test/fixtures/linter/autofix/ansi/008_looping_rules_LT02_LT04_LT05/before.sql
diff --git a/test/fixtures/linter/autofix/ansi/008_looping_rules_LT02_LT04_LT05/test-config.yml b/test/fixtures/linter/autofix/ansi/008_looping_rules_LT02_LT04_LT05/test-config.yml
new file mode 100644
index 0000000..d220855
--- /dev/null
+++ b/test/fixtures/linter/autofix/ansi/008_looping_rules_LT02_LT04_LT05/test-config.yml
@@ -0,0 +1,5 @@
+test-config:
+  rules:
+    - LT02
+    - LT05
+    - LT04
diff --git a/test/fixtures/linter/autofix/ansi/008_looping_rules_l003_l016_l019/test-config.yml b/test/fixtures/linter/autofix/ansi/008_looping_rules_l003_l016_l019/test-config.yml
deleted file mode 100644
index f23c273..0000000
--- a/test/fixtures/linter/autofix/ansi/008_looping_rules_l003_l016_l019/test-config.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-test-config:
-  rules:
-    - L003
-    - L016
-    - L019
diff --git a/test/fixtures/linter/autofix/ansi/008_with_clause/test-config.yml b/test/fixtures/linter/autofix/ansi/008_with_clause/test-config.yml
index b087064..74fe431 100644
--- a/test/fixtures/linter/autofix/ansi/008_with_clause/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/008_with_clause/test-config.yml
@@ -1,5 +1,4 @@
 test-config:
   rules:
-    - L022
-    - L023
-    - L024
+    - LT01
+    - LT08
diff --git a/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/.sqlfluff b/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/.sqlfluff
index 1fdf790..b36d094 100644
--- a/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/.sqlfluff
+++ b/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/.sqlfluff
@@ -1,2 +1,2 @@
-[sqlfluff:rules:L010]
+[sqlfluff:rules:capitalisation.keywords]
 capitalisation_policy = lower
diff --git a/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/test-config.yml b/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/test-config.yml
index 7b022e0..cb862d9 100644
--- a/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/test-config.yml
@@ -1,4 +1,4 @@
 test-config:
   rules:
-    - L010
-    - L040
+    - CP01
+    - CP04
diff --git a/test/fixtures/linter/autofix/ansi/010_CTEs_and_newlines/test-config.yml b/test/fixtures/linter/autofix/ansi/010_CTEs_and_newlines/test-config.yml
index f67e677..13e4925 100644
--- a/test/fixtures/linter/autofix/ansi/010_CTEs_and_newlines/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/010_CTEs_and_newlines/test-config.yml
@@ -1,3 +1,3 @@
 test-config:
   rules:
-    - L022
+    - LT08
diff --git a/test/fixtures/linter/autofix/ansi/011_indentation/test-config.yml b/test/fixtures/linter/autofix/ansi/011_indentation/test-config.yml
index b65bbdd..f285379 100644
--- a/test/fixtures/linter/autofix/ansi/011_indentation/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/011_indentation/test-config.yml
@@ -1,4 +1,4 @@
 test-config:
   rules:
-    - L003
-    - L006
+    - LT01
+    - LT02
diff --git a/test/fixtures/linter/autofix/ansi/012_templating/test-config.yml b/test/fixtures/linter/autofix/ansi/012_templating/test-config.yml
index 9c7b5c6..e3c328d 100644
--- a/test/fixtures/linter/autofix/ansi/012_templating/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/012_templating/test-config.yml
@@ -1,3 +1,3 @@
 test-config:
   rules:
-    - L006
+    - LT01
diff --git a/test/fixtures/linter/autofix/ansi/013_order_by_explicit/test-config.yml b/test/fixtures/linter/autofix/ansi/013_order_by_explicit/test-config.yml
index 3206c09..f6185d6 100644
--- a/test/fixtures/linter/autofix/ansi/013_order_by_explicit/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/013_order_by_explicit/test-config.yml
@@ -1,3 +1,3 @@
 test-config:
   rules:
-    - L037
+    - AM03
diff --git a/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030/after.sql b/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030/after.sql
index 6163f5d..95377f0 100644
--- a/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030/after.sql
+++ b/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030/after.sql
@@ -1 +1 @@
-SELECT FLOOR(dt) , COUNT(*) FROM test
\ No newline at end of file
+SELECT FLOOR(dt), COUNT(*) FROM test
\ No newline at end of file
diff --git a/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030/test-config.yml b/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030/test-config.yml
index 9cbe8fa..944c23f 100644
--- a/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030/test-config.yml
@@ -1,4 +1,4 @@
 test-config:
   rules:
-    - L008
-    - L030
+    - LT01
+    - CP03
diff --git a/test/fixtures/linter/autofix/ansi/015_jinja_leading_whitespace/test-config.yml b/test/fixtures/linter/autofix/ansi/015_jinja_leading_whitespace/test-config.yml
index b51361c..d06aa11 100644
--- a/test/fixtures/linter/autofix/ansi/015_jinja_leading_whitespace/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/015_jinja_leading_whitespace/test-config.yml
@@ -1,3 +1,3 @@
 test-config:
   rules:
-    - L036
+    - LT09
diff --git a/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if/test-config.yml b/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if/test-config.yml
index 4805146..74d23fd 100644
--- a/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if/test-config.yml
@@ -1,4 +1,4 @@
 test-config:
   rules:
-    - L003
-    - L016
+    - LT02
+    - LT05
diff --git a/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if2/test-config.yml b/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if2/test-config.yml
index 4805146..74d23fd 100644
--- a/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if2/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if2/test-config.yml
@@ -1,4 +1,4 @@
 test-config:
   rules:
-    - L003
-    - L016
+    - LT02
+    - LT05
diff --git a/test/fixtures/linter/autofix/ansi/018_l003_indent_templated_code/.sqlfluff b/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/.sqlfluff
similarity index 84%
rename from test/fixtures/linter/autofix/ansi/018_l003_indent_templated_code/.sqlfluff
rename to test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/.sqlfluff
index e12226d..a426d9c 100644
--- a/test/fixtures/linter/autofix/ansi/018_l003_indent_templated_code/.sqlfluff
+++ b/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/.sqlfluff
@@ -1,7 +1,7 @@
 [sqlfluff]
 
 # TODO: This setting defaults to true. I had to set it false in order to allow a
-# rule (L003) to indent a templated table name. Technically, indenting templated
+# rule (LT02) to indent a templated table name. Technically, indenting templated
 # code is not "touching" templated code, but in order for SQLFluff to detect
 # this and allow the fixes to be applied using default settings,  we'd need to
 # tweak some of the anchor and create logic for LintResult.
diff --git a/test/fixtures/linter/autofix/ansi/018_l003_indent_templated_code/after.sql b/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/after.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/018_l003_indent_templated_code/after.sql
rename to test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/after.sql
diff --git a/test/fixtures/linter/autofix/ansi/018_l003_indent_templated_code/before.sql b/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/before.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/018_l003_indent_templated_code/before.sql
rename to test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/before.sql
diff --git a/test/fixtures/linter/autofix/ansi/018_l003_indent_templated_code/test-config.yml b/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/test-config.yml
similarity index 66%
rename from test/fixtures/linter/autofix/ansi/018_l003_indent_templated_code/test-config.yml
rename to test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/test-config.yml
index 587d14d..1f7873f 100644
--- a/test/fixtures/linter/autofix/ansi/018_l003_indent_templated_code/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/test-config.yml
@@ -1,3 +1,3 @@
 test-config:
   rules:
-    - L003
+    - LT02
diff --git a/test/fixtures/linter/autofix/ansi/019_trailing_comma_to_leading/test-config.yml b/test/fixtures/linter/autofix/ansi/019_trailing_comma_to_leading/test-config.yml
index 98ed860..9dd2a6b 100644
--- a/test/fixtures/linter/autofix/ansi/019_trailing_comma_to_leading/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/019_trailing_comma_to_leading/test-config.yml
@@ -1,3 +1,3 @@
 test-config:
   rules:
-    - L019
+    - LT04
diff --git a/test/fixtures/linter/autofix/ansi/020_L008_trailing_comma/test-config.yml b/test/fixtures/linter/autofix/ansi/020_L008_trailing_comma/test-config.yml
index 2a0744e..e3c328d 100644
--- a/test/fixtures/linter/autofix/ansi/020_L008_trailing_comma/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/020_L008_trailing_comma/test-config.yml
@@ -1,3 +1,3 @@
 test-config:
   rules:
-    - L008
+    - LT01
diff --git a/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/after.sql b/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/after.sql
index c6cf326..56c0dae 100644
--- a/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/after.sql
+++ b/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/after.sql
@@ -1,4 +1,4 @@
---noqa: disable=L034
+--noqa: disable=ST06
 SELECT DISTINCT
     TO_CHAR(a, 'YYYY-MM-dd HH:MM:ss') AS the_date,
     a AS b
@@ -7,16 +7,16 @@ FROM
 
 SELECT
     col_a AS a,
-    col_b b, --noqa: disable=L012
+    col_b b, --noqa: disable=AL02
     col_c c,
-    col_d AS d, --noqa: enable=L012
+    col_d AS d, --noqa: enable=AL02
     col_e AS e,
     col_f AS f,
     col_g g,  --noqa
     col_h AS h,
-    col_i i, --noqa:L012
+    col_i i, --noqa:AL02
     col_j AS j,
-    col_k AS k, --noqa:L013
+    col_k AS k, --noqa:AL03
     col_l AS l,
     col_m AS m,
     col_n n, --noqa: disable=all
diff --git a/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/before.sql b/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/before.sql
index 694ee35..0fb5b64 100644
--- a/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/before.sql
+++ b/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/before.sql
@@ -1,4 +1,4 @@
---noqa: disable=L034
+--noqa: disable=ST06
 SELECT DISTINCT
     TO_CHAR(a, 'YYYY-MM-dd HH:MM:ss') as the_date,
     a AS b
@@ -7,16 +7,16 @@ FROM
 
 SELECT
     col_a a,
-    col_b b, --noqa: disable=L012
+    col_b b, --noqa: disable=AL02
     col_c c,
-    col_d d, --noqa: enable=L012
+    col_d d, --noqa: enable=AL02
     col_e e,
     col_f f,
     col_g g,  --noqa
     col_h h,
-    col_i i, --noqa:L012
+    col_i i, --noqa:AL02
     col_j j,
-    col_k k, --noqa:L013
+    col_k k, --noqa:AL03
     col_l l,
     col_m m,
     col_n n, --noqa: disable=all
diff --git a/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/test-config.yml b/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/test-config.yml
index 5ded4e2..7175311 100644
--- a/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/test-config.yml
@@ -1,5 +1,5 @@
 test-config:
   rules:
-    - L010
-    - L012
-    - L034
+    - CP01
+    - AL02
+    - ST06
diff --git a/test/fixtures/linter/autofix/ansi/022_l019_corrupts_parse_tree_and_causes_l034_to_corrupt_sql/after.sql b/test/fixtures/linter/autofix/ansi/022_LT04_corrupts_parse_tree_and_causes_ST06_to_corrupt_sql/after.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/022_l019_corrupts_parse_tree_and_causes_l034_to_corrupt_sql/after.sql
rename to test/fixtures/linter/autofix/ansi/022_LT04_corrupts_parse_tree_and_causes_ST06_to_corrupt_sql/after.sql
diff --git a/test/fixtures/linter/autofix/ansi/022_l019_corrupts_parse_tree_and_causes_l034_to_corrupt_sql/before.sql b/test/fixtures/linter/autofix/ansi/022_LT04_corrupts_parse_tree_and_causes_ST06_to_corrupt_sql/before.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/022_l019_corrupts_parse_tree_and_causes_l034_to_corrupt_sql/before.sql
rename to test/fixtures/linter/autofix/ansi/022_LT04_corrupts_parse_tree_and_causes_ST06_to_corrupt_sql/before.sql
diff --git a/test/fixtures/linter/autofix/ansi/025_l043_and_l035_interaction/test-config.yml b/test/fixtures/linter/autofix/ansi/022_LT04_corrupts_parse_tree_and_causes_ST06_to_corrupt_sql/test-config.yml
similarity index 50%
rename from test/fixtures/linter/autofix/ansi/025_l043_and_l035_interaction/test-config.yml
rename to test/fixtures/linter/autofix/ansi/022_LT04_corrupts_parse_tree_and_causes_ST06_to_corrupt_sql/test-config.yml
index b68c3b6..dcd06c0 100644
--- a/test/fixtures/linter/autofix/ansi/025_l043_and_l035_interaction/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/022_LT04_corrupts_parse_tree_and_causes_ST06_to_corrupt_sql/test-config.yml
@@ -1,4 +1,4 @@
 test-config:
   rules:
-    - L035
-    - L043
+    - LT04
+    - ST06
diff --git a/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/.sqlfluff b/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/.sqlfluff
new file mode 100644
index 0000000..a79c445
--- /dev/null
+++ b/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/.sqlfluff
@@ -0,0 +1,2 @@
+[sqlfluff]
+max_line_length = 70
diff --git a/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/after.sql b/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/after.sql
new file mode 100644
index 0000000..e2eb78f
--- /dev/null
+++ b/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/after.sql
@@ -0,0 +1,10 @@
+SELECT
+    *
+FROM
+    superverylongtablenamereallyreally1
+WHERE
+    long_varname_to_trigger_Rule_LT05_id in (
+        SELECT distinct id
+        FROM superverylongtablenamereallyreally2
+        WHERE deletedat IS NULL
+    )
diff --git a/test/fixtures/linter/autofix/ansi/023_l016_confuses_l041/before.sql b/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/before.sql
similarity index 70%
rename from test/fixtures/linter/autofix/ansi/023_l016_confuses_l041/before.sql
rename to test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/before.sql
index 9ba4536..9498977 100644
--- a/test/fixtures/linter/autofix/ansi/023_l016_confuses_l041/before.sql
+++ b/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/before.sql
@@ -3,4 +3,4 @@ SELECT
     FROM
         superverylongtablenamereallyreally1
     WHERE
-        long_varname_to_trigger_Rule_L016_id in (SELECT distinct id FROM superverylongtablenamereallyreally2 WHERE deletedat IS NULL)
+        long_varname_to_trigger_Rule_LT05_id in (SELECT distinct id FROM superverylongtablenamereallyreally2 WHERE deletedat IS NULL)
diff --git a/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/test-config.yml b/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/test-config.yml
new file mode 100644
index 0000000..570d832
--- /dev/null
+++ b/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/test-config.yml
@@ -0,0 +1,8 @@
+test-config:
+  # NOTE: LT02 is included in this test case because the fix for
+  # LT05 doesn't really make sense without it as the existing
+  # query is poorly indented.
+  rules:
+    - LT02
+    - LT05
+    - LT10
diff --git a/test/fixtures/linter/autofix/ansi/023_l016_confuses_l041/after.sql b/test/fixtures/linter/autofix/ansi/023_l016_confuses_l041/after.sql
deleted file mode 100644
index c4c968d..0000000
--- a/test/fixtures/linter/autofix/ansi/023_l016_confuses_l041/after.sql
+++ /dev/null
@@ -1,10 +0,0 @@
-SELECT
-        *
-    FROM
-        superverylongtablenamereallyreally1
-    WHERE
-        long_varname_to_trigger_Rule_L016_id in (
-            SELECT distinct
-                id
-            FROM superverylongtablenamereallyreally2 WHERE deletedat IS NULL
-        )
diff --git a/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/after.sql b/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/after.sql
index 928605d..c67c829 100644
--- a/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/after.sql
+++ b/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/after.sql
@@ -1,5 +1,5 @@
 -- Templated query aimed to test the BaseRule.remove_templated_errors()
 -- function's behavior of not modifying templated sections.
 SELECT
-   {{ par_wrap() }}
+    {{ par_wrap() }}
     , line_two AS line_two
diff --git a/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/test-config.yml b/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/test-config.yml
index e8ef0e8..07f7a13 100644
--- a/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/test-config.yml
@@ -1,6 +1,6 @@
 test-config:
   rules:
-    - L003
-    - L010
-    - L019
-    - L034
+    - LT02
+    - CP01
+    - LT04
+    - ST06
diff --git a/test/fixtures/linter/autofix/ansi/025_l043_and_l035_interaction/after.sql b/test/fixtures/linter/autofix/ansi/025_ST02_and_ST01_interaction/after.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/025_l043_and_l035_interaction/after.sql
rename to test/fixtures/linter/autofix/ansi/025_ST02_and_ST01_interaction/after.sql
diff --git a/test/fixtures/linter/autofix/ansi/025_l043_and_l035_interaction/before.sql b/test/fixtures/linter/autofix/ansi/025_ST02_and_ST01_interaction/before.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/025_l043_and_l035_interaction/before.sql
rename to test/fixtures/linter/autofix/ansi/025_ST02_and_ST01_interaction/before.sql
diff --git a/test/fixtures/linter/autofix/ansi/022_l019_corrupts_parse_tree_and_causes_l034_to_corrupt_sql/test-config.yml b/test/fixtures/linter/autofix/ansi/025_ST02_and_ST01_interaction/test-config.yml
similarity index 50%
rename from test/fixtures/linter/autofix/ansi/022_l019_corrupts_parse_tree_and_causes_l034_to_corrupt_sql/test-config.yml
rename to test/fixtures/linter/autofix/ansi/025_ST02_and_ST01_interaction/test-config.yml
index 01ed778..84e959b 100644
--- a/test/fixtures/linter/autofix/ansi/022_l019_corrupts_parse_tree_and_causes_l034_to_corrupt_sql/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/025_ST02_and_ST01_interaction/test-config.yml
@@ -1,4 +1,4 @@
 test-config:
   rules:
-    - L019
-    - L034
+    - ST01
+    - ST02
diff --git a/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixes/.sqlfluff b/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixes/.sqlfluff
new file mode 100644
index 0000000..cb2e2a0
--- /dev/null
+++ b/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixes/.sqlfluff
@@ -0,0 +1,2 @@
+[sqlfluff:rules:aliasing.forbid]
+force_enable = true
diff --git a/test/fixtures/linter/autofix/ansi/026_l016_line_length_includes_earlier_fixes/after.sql b/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixes/after.sql
similarity index 64%
rename from test/fixtures/linter/autofix/ansi/026_l016_line_length_includes_earlier_fixes/after.sql
rename to test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixes/after.sql
index ad8942d..4de2e26 100644
--- a/test/fixtures/linter/autofix/ansi/026_l016_line_length_includes_earlier_fixes/after.sql
+++ b/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixes/after.sql
@@ -2,10 +2,10 @@ SELECT
     abs(round(foo_bar_report.metricx-xxx_yyy_report.metricx)) as col_c_rel_diff,
     abs(
         (
-            round(
-                foo_bar_report.metricx-xxx_yyy_report.metricx
-            )/foo_bar_report.metricx
-        )*100
+            round(foo_bar_report.metricx-xxx_yyy_report.metricx)
+            /foo_bar_report.metricx
+        )
+        *100
     ) as metric_x_rel_diff
 FROM foo_bar_report
 LEFT JOIN xxx_yyy_report
diff --git a/test/fixtures/linter/autofix/ansi/026_l016_line_length_includes_earlier_fixes/before.sql b/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixes/before.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/026_l016_line_length_includes_earlier_fixes/before.sql
rename to test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixes/before.sql
diff --git a/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixes/test-config.yml b/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixes/test-config.yml
new file mode 100644
index 0000000..f952b22
--- /dev/null
+++ b/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixes/test-config.yml
@@ -0,0 +1,5 @@
+test-config:
+  rules:
+    - LT02
+    - LT05
+    - AL07
diff --git a/test/fixtures/linter/autofix/ansi/026_l016_line_length_includes_earlier_fixes/test-config.yml b/test/fixtures/linter/autofix/ansi/026_l016_line_length_includes_earlier_fixes/test-config.yml
deleted file mode 100644
index 1492987..0000000
--- a/test/fixtures/linter/autofix/ansi/026_l016_line_length_includes_earlier_fixes/test-config.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-test-config:
-  rules:
-    - L003
-    - L016
-    - L031
diff --git a/test/fixtures/linter/autofix/ansi/027_l003_l018_l022_wrong_indent_cte/after.sql b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_LT08_wrong_indent_cte/after.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/027_l003_l018_l022_wrong_indent_cte/after.sql
rename to test/fixtures/linter/autofix/ansi/027_LT02_LT07_LT08_wrong_indent_cte/after.sql
diff --git a/test/fixtures/linter/autofix/ansi/027_l003_l018_l022_wrong_indent_cte/before.sql b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_LT08_wrong_indent_cte/before.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/027_l003_l018_l022_wrong_indent_cte/before.sql
rename to test/fixtures/linter/autofix/ansi/027_LT02_LT07_LT08_wrong_indent_cte/before.sql
diff --git a/test/fixtures/linter/autofix/ansi/027_LT02_LT07_LT08_wrong_indent_cte/test-config.yml b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_LT08_wrong_indent_cte/test-config.yml
new file mode 100644
index 0000000..f655003
--- /dev/null
+++ b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_LT08_wrong_indent_cte/test-config.yml
@@ -0,0 +1,5 @@
+test-config:
+  rules:
+    - LT02
+    - LT07
+    - LT08
diff --git a/test/fixtures/linter/autofix/ansi/027_l003_l018_templated_block_without_newline_cte/after.sql b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_without_newline_cte/after.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/027_l003_l018_templated_block_without_newline_cte/after.sql
rename to test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_without_newline_cte/after.sql
diff --git a/test/fixtures/linter/autofix/ansi/027_l003_l018_templated_block_without_newline_cte/before.sql b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_without_newline_cte/before.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/027_l003_l018_templated_block_without_newline_cte/before.sql
rename to test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_without_newline_cte/before.sql
diff --git a/test/fixtures/linter/autofix/ansi/023_l016_confuses_l041/test-config.yml b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_without_newline_cte/test-config.yml
similarity index 50%
rename from test/fixtures/linter/autofix/ansi/023_l016_confuses_l041/test-config.yml
rename to test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_without_newline_cte/test-config.yml
index 2d6f9f8..aed7d16 100644
--- a/test/fixtures/linter/autofix/ansi/023_l016_confuses_l041/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_without_newline_cte/test-config.yml
@@ -1,4 +1,4 @@
 test-config:
   rules:
-    - L016
-    - L041
+    - LT02
+    - LT07
diff --git a/test/fixtures/linter/autofix/ansi/027_l003_l018_templated_block_wrong_indent_cte/after.sql b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_wrong_indent_cte/after.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/027_l003_l018_templated_block_wrong_indent_cte/after.sql
rename to test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_wrong_indent_cte/after.sql
diff --git a/test/fixtures/linter/autofix/ansi/027_l003_l018_templated_block_wrong_indent_cte/before.sql b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_wrong_indent_cte/before.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/027_l003_l018_templated_block_wrong_indent_cte/before.sql
rename to test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_wrong_indent_cte/before.sql
diff --git a/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_wrong_indent_cte/test-config.yml b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_wrong_indent_cte/test-config.yml
new file mode 100644
index 0000000..aed7d16
--- /dev/null
+++ b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_wrong_indent_cte/test-config.yml
@@ -0,0 +1,4 @@
+test-config:
+  rules:
+    - LT02
+    - LT07
diff --git a/test/fixtures/linter/autofix/ansi/027_l003_l018_wrong_indent_with/after.sql b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_wrong_indent_with/after.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/027_l003_l018_wrong_indent_with/after.sql
rename to test/fixtures/linter/autofix/ansi/027_LT02_LT07_wrong_indent_with/after.sql
diff --git a/test/fixtures/linter/autofix/ansi/027_l003_l018_wrong_indent_with/before.sql b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_wrong_indent_with/before.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/027_l003_l018_wrong_indent_with/before.sql
rename to test/fixtures/linter/autofix/ansi/027_LT02_LT07_wrong_indent_with/before.sql
diff --git a/test/fixtures/linter/autofix/ansi/027_LT02_LT07_wrong_indent_with/test-config.yml b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_wrong_indent_with/test-config.yml
new file mode 100644
index 0000000..aed7d16
--- /dev/null
+++ b/test/fixtures/linter/autofix/ansi/027_LT02_LT07_wrong_indent_with/test-config.yml
@@ -0,0 +1,4 @@
+test-config:
+  rules:
+    - LT02
+    - LT07
diff --git a/test/fixtures/linter/autofix/ansi/027_l003_l018_l022_wrong_indent_cte/test-config.yml b/test/fixtures/linter/autofix/ansi/027_l003_l018_l022_wrong_indent_cte/test-config.yml
deleted file mode 100644
index 74cfeb0..0000000
--- a/test/fixtures/linter/autofix/ansi/027_l003_l018_l022_wrong_indent_cte/test-config.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-test-config:
-  rules:
-    - L003
-    - L018
-    - L022
diff --git a/test/fixtures/linter/autofix/ansi/027_l003_l018_templated_block_without_newline_cte/test-config.yml b/test/fixtures/linter/autofix/ansi/027_l003_l018_templated_block_without_newline_cte/test-config.yml
deleted file mode 100644
index 1bb7e9e..0000000
--- a/test/fixtures/linter/autofix/ansi/027_l003_l018_templated_block_without_newline_cte/test-config.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-test-config:
-  rules:
-    - L003
-    - L018
diff --git a/test/fixtures/linter/autofix/ansi/027_l003_l018_templated_block_wrong_indent_cte/test-config.yml b/test/fixtures/linter/autofix/ansi/027_l003_l018_templated_block_wrong_indent_cte/test-config.yml
deleted file mode 100644
index 1bb7e9e..0000000
--- a/test/fixtures/linter/autofix/ansi/027_l003_l018_templated_block_wrong_indent_cte/test-config.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-test-config:
-  rules:
-    - L003
-    - L018
diff --git a/test/fixtures/linter/autofix/ansi/027_l003_l018_wrong_indent_with/test-config.yml b/test/fixtures/linter/autofix/ansi/027_l003_l018_wrong_indent_with/test-config.yml
deleted file mode 100644
index 1bb7e9e..0000000
--- a/test/fixtures/linter/autofix/ansi/027_l003_l018_wrong_indent_with/test-config.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-test-config:
-  rules:
-    - L003
-    - L018
diff --git a/test/fixtures/linter/autofix/ansi/L042_subqueries_in_joins/test-config.yml b/test/fixtures/linter/autofix/ansi/L042_subqueries_in_joins/test-config.yml
deleted file mode 100644
index 4f8fa80..0000000
--- a/test/fixtures/linter/autofix/ansi/L042_subqueries_in_joins/test-config.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-test-config:
-  rules:
-    - L042
-    - L046
diff --git a/test/fixtures/linter/autofix/ansi/L042_subqueries_in_joins/after.sql b/test/fixtures/linter/autofix/ansi/ST05_subqueries_in_joins/after.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/L042_subqueries_in_joins/after.sql
rename to test/fixtures/linter/autofix/ansi/ST05_subqueries_in_joins/after.sql
diff --git a/test/fixtures/linter/autofix/ansi/L042_subqueries_in_joins/before.sql b/test/fixtures/linter/autofix/ansi/ST05_subqueries_in_joins/before.sql
similarity index 100%
rename from test/fixtures/linter/autofix/ansi/L042_subqueries_in_joins/before.sql
rename to test/fixtures/linter/autofix/ansi/ST05_subqueries_in_joins/before.sql
diff --git a/test/fixtures/linter/autofix/ansi/ST05_subqueries_in_joins/test-config.yml b/test/fixtures/linter/autofix/ansi/ST05_subqueries_in_joins/test-config.yml
new file mode 100644
index 0000000..2060819
--- /dev/null
+++ b/test/fixtures/linter/autofix/ansi/ST05_subqueries_in_joins/test-config.yml
@@ -0,0 +1,4 @@
+test-config:
+  rules:
+    - ST05
+    - JJ01
diff --git a/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variable/test-config.yml b/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variable/test-config.yml
index b51361c..d06aa11 100644
--- a/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variable/test-config.yml
+++ b/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variable/test-config.yml
@@ -1,3 +1,3 @@
 test-config:
   rules:
-    - L036
+    - LT09
diff --git a/test/fixtures/linter/autofix/bigquery/001_templating/.sqlfluff b/test/fixtures/linter/autofix/bigquery/001_templating/.sqlfluff
index 1aa32a6..3e8e6cf 100644
--- a/test/fixtures/linter/autofix/bigquery/001_templating/.sqlfluff
+++ b/test/fixtures/linter/autofix/bigquery/001_templating/.sqlfluff
@@ -2,3 +2,6 @@
 project=marketing_segmentation
 dataset=dataset
 label_prob_threshold=0.8
+
+[sqlfluff:indentation]
+allow_implicit_indents=True
diff --git a/test/fixtures/linter/autofix/bigquery/001_templating/after.sql b/test/fixtures/linter/autofix/bigquery/001_templating/after.sql
index cd5e59a..1e77266 100644
--- a/test/fixtures/linter/autofix/bigquery/001_templating/after.sql
+++ b/test/fixtures/linter/autofix/bigquery/001_templating/after.sql
@@ -1,5 +1,5 @@
 select *
-from  `{{project}}.{{dataset}}.user_labels_with_probs`
+from `{{project}}.{{dataset}}.user_labels_with_probs`
 where prob_max >= {{label_prob_threshold}}
     --- only focus on 3 segments
     and label_str not in ('marketing_maven', 'growth_services')
diff --git a/test/fixtures/linter/autofix/bigquery/001_templating/test-config.yml b/test/fixtures/linter/autofix/bigquery/001_templating/test-config.yml
index 91d6bd2..c527fde 100644
--- a/test/fixtures/linter/autofix/bigquery/001_templating/test-config.yml
+++ b/test/fixtures/linter/autofix/bigquery/001_templating/test-config.yml
@@ -1,5 +1,5 @@
 test-config:
   rules:
-    - L003
-    - L006
-    - L009
+    - LT02
+    - LT01
+    - LT12
diff --git a/test/fixtures/linter/autofix/bigquery/002_templating/.sqlfluff b/test/fixtures/linter/autofix/bigquery/002_templating/.sqlfluff
index 279e1c0..6118aba 100644
--- a/test/fixtures/linter/autofix/bigquery/002_templating/.sqlfluff
+++ b/test/fixtures/linter/autofix/bigquery/002_templating/.sqlfluff
@@ -1,4 +1,4 @@
-[sqlfluff:rules]
+[sqlfluff]
 max_line_length = 50
 
 [sqlfluff:templater:jinja:context]
diff --git a/test/fixtures/linter/autofix/bigquery/002_templating/test-config.yml b/test/fixtures/linter/autofix/bigquery/002_templating/test-config.yml
index 7486201..9476789 100644
--- a/test/fixtures/linter/autofix/bigquery/002_templating/test-config.yml
+++ b/test/fixtures/linter/autofix/bigquery/002_templating/test-config.yml
@@ -1,7 +1,6 @@
 test-config:
   rules:
-    - L003
-    - L006
-    - L008
-    - L009
-    - L016
+    - LT01
+    - LT02
+    - LT12
+    - LT05
diff --git a/test/fixtures/linter/autofix/bigquery/003_templating/test-config.yml b/test/fixtures/linter/autofix/bigquery/003_templating/test-config.yml
index 353a1c4..43077cd 100644
--- a/test/fixtures/linter/autofix/bigquery/003_templating/test-config.yml
+++ b/test/fixtures/linter/autofix/bigquery/003_templating/test-config.yml
@@ -1,7 +1,5 @@
 test-config:
   rules:
-    - L001
-    - L003
-    - L006
-    - L008
-    - L009
+    - LT01
+    - LT02
+    - LT12
diff --git a/test/fixtures/linter/autofix/bigquery/004_templating/after.sql b/test/fixtures/linter/autofix/bigquery/004_templating/after.sql
index adbb590..ea1c607 100644
--- a/test/fixtures/linter/autofix/bigquery/004_templating/after.sql
+++ b/test/fixtures/linter/autofix/bigquery/004_templating/after.sql
@@ -24,11 +24,13 @@ raw_effect_sizes AS (
         SELECT
             COUNT(1) AS campaign_count_{{action}},
             {{corr_states}}
-            -- NOTE: The L003 fix routine behaves a little strangely here around the templated
+            -- NOTE: The LT02 fix routine behaves a little strangely here around the templated
             -- code, specifically the indentation of STDDEV_POP and preceding comments. This
             -- is a bug currently with no obvious solution.
-            , SAFE_DIVIDE(SAFE_MULTIPLY(CORR({{metric}}_rate_su, {{action}}), STDDEV_POP({{metric}}_rate_su)),
-                STDDEV_POP({{action}})) AS {{metric}}_{{action}}
+            , SAFE_DIVIDE(
+                SAFE_MULTIPLY(CORR({{metric}}_rate_su, {{action}}), STDDEV_POP({{metric}}_rate_su)),
+                STDDEV_POP({{action}})
+            ) AS {{metric}}_{{action}}
         FROM
             `{{gcp_project}}.{{dataset}}.global_actions_states`
         WHERE
diff --git a/test/fixtures/linter/autofix/bigquery/004_templating/before.sql b/test/fixtures/linter/autofix/bigquery/004_templating/before.sql
index 4aa0dd6..9f46185 100644
--- a/test/fixtures/linter/autofix/bigquery/004_templating/before.sql
+++ b/test/fixtures/linter/autofix/bigquery/004_templating/before.sql
@@ -24,7 +24,7 @@ WITH
   SELECT
     COUNT(1) AS campaign_count_{{action}},
     {{corr_states}}
-    -- NOTE: The L003 fix routine behaves a little strangely here around the templated
+    -- NOTE: The LT02 fix routine behaves a little strangely here around the templated
     -- code, specifically the indentation of STDDEV_POP and preceding comments. This
     -- is a bug currently with no obvious solution.
     ,SAFE_DIVIDE(SAFE_MULTIPLY(CORR({{metric}}_rate_su, {{action}}), STDDEV_POP({{metric}}_rate_su)),
diff --git a/test/fixtures/linter/autofix/bigquery/004_templating/test-config.yml b/test/fixtures/linter/autofix/bigquery/004_templating/test-config.yml
index 353a1c4..43077cd 100644
--- a/test/fixtures/linter/autofix/bigquery/004_templating/test-config.yml
+++ b/test/fixtures/linter/autofix/bigquery/004_templating/test-config.yml
@@ -1,7 +1,5 @@
 test-config:
   rules:
-    - L001
-    - L003
-    - L006
-    - L008
-    - L009
+    - LT01
+    - LT02
+    - LT12
diff --git a/test/fixtures/linter/autofix/bigquery/005_unnest_spacing/test-config.yml b/test/fixtures/linter/autofix/bigquery/005_unnest_spacing/test-config.yml
index 70029a3..82463d8 100644
--- a/test/fixtures/linter/autofix/bigquery/005_unnest_spacing/test-config.yml
+++ b/test/fixtures/linter/autofix/bigquery/005_unnest_spacing/test-config.yml
@@ -1,4 +1,4 @@
 test-config:
   rules:
-    - L003
-    - L025
+    - LT02
+    - AL05
diff --git a/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/.sqlfluff b/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/.sqlfluff
index 1a490f4..a821a1b 100644
--- a/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/.sqlfluff
+++ b/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/.sqlfluff
@@ -3,5 +3,5 @@ dialect = bigquery
 ignore = templating
 fix_even_unparsable = True
 
-[sqlfluff:rules:L010]
+[sqlfluff:rules:capitalisation.keywords]
 capitalisation_policy = upper
diff --git a/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/after.sql b/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/after.sql
index 6b5e5ee..74daa3c 100644
--- a/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/after.sql
+++ b/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/after.sql
@@ -1,17 +1,20 @@
-SELECT * EXCEPT({% include query %}) FROM
+SELECT * EXCEPT ({% include query %}) FROM
     (
         SELECT
             tbl1.*,
-            row_number() OVER (
-                PARTITION BY
-                    tbl1.the_name, {{ context_columns | join(', ') }}
-                ORDER BY created_at DESC
-            ) AS rnk
+            row_number()
+                OVER (
+                    PARTITION BY
+                        tbl1.the_name, {{ context_columns | join(', ') }}
+                    ORDER BY created_at DESC
+                )
+                AS rnk
             {% if context_columns | default("abc") == "abc" %}
             FROM tbl1
             {% endif %}
         INNER JOIN tbl2
-            ON tbl1.the_name = tbl2.the_name
+            ON
+                tbl1.the_name = tbl2.the_name
                 AND tbl1.run_id = tbl2.run_id
         WHERE {{ run_rnk }} = {% include "foobar.sql" %}
     )
diff --git a/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/test-config.yml b/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/test-config.yml
index 456af2d..0d45ff2 100644
--- a/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/test-config.yml
+++ b/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/test-config.yml
@@ -1,8 +1,8 @@
 test-config:
   rules:
-    - L003
-    - L008
-    - L010
-    - L012
-    - L016
-    - L036
+    - LT02
+    - LT01
+    - CP01
+    - AL02
+    - LT05
+    - LT09
diff --git a/test/fixtures/linter/autofix/snowflake/001_semi_structured/.sqlfluff b/test/fixtures/linter/autofix/snowflake/001_semi_structured/.sqlfluff
index f2abc85..5902c6d 100644
--- a/test/fixtures/linter/autofix/snowflake/001_semi_structured/.sqlfluff
+++ b/test/fixtures/linter/autofix/snowflake/001_semi_structured/.sqlfluff
@@ -1,2 +1,2 @@
-[sqlfluff:rules:L014]
+[sqlfluff:rules:capitalisation.identifiers]
 capitalisation_policy = lower
diff --git a/test/fixtures/linter/autofix/snowflake/001_semi_structured/test-config.yml b/test/fixtures/linter/autofix/snowflake/001_semi_structured/test-config.yml
index 9ad137c..7b2dc1d 100644
--- a/test/fixtures/linter/autofix/snowflake/001_semi_structured/test-config.yml
+++ b/test/fixtures/linter/autofix/snowflake/001_semi_structured/test-config.yml
@@ -1,3 +1,3 @@
 test-config:
   rules:
-    - L014
+    - CP02
diff --git a/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/.sqlfluff b/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/.sqlfluff
index 7d05caf..931ba33 100644
--- a/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/.sqlfluff
+++ b/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/.sqlfluff
@@ -1,8 +1,9 @@
 [sqlfluff]
 dialect = snowflake
 templater = jinja
-exclude_rules = L011,L016,L031,L034,L035
+exclude_rules = AL01,LT05,AL07,ST06,ST01
 output_line_length = 120
+max_line_length = 120
 
 [sqlfluff:layout:type:binary_operator]
 line_position = leading
@@ -10,19 +11,17 @@ line_position = leading
 [sqlfluff:layout:type:comparison_operator]
 line_position = leading
 
-# Some rules can be configured directly from the config common to other rules.
-[sqlfluff:rules]
+[sqlfluff:indentation]
 tab_space_size = 2
-max_line_length = 120
 
-[sqlfluff:rules:L010]  # Keywords
+[sqlfluff:rules:capitalisation.keywords]  # Keywords
 capitalisation_policy = upper
 
-[sqlfluff:rules:L013]  # Column expressions
+[sqlfluff:rules:AL03]  # Column expressions
 allow_scalar = False
 
-[sqlfluff:rules:L014]  # Unquoted identifiers
+[sqlfluff:rules:capitalisation.identifiers]  # Unquoted identifiers
 extended_capitalisation_policy = lower
 
-[sqlfluff:rules:L030]  # Function names
+[sqlfluff:rules:capitalisation.functions]  # Function names
 extended_capitalisation_policy = lower
diff --git a/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/after.sql b/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/after.sql
index 05f6a00..da2ecb4 100644
--- a/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/after.sql
+++ b/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/after.sql
@@ -6,8 +6,8 @@ USING (
     split(foo, '|')[2] REGEXP '^\\d+\\-\\d+\\-\\d+ \\d+\\:\\d+$'
     OR foo IN ('BAR', 'FOO')
 ) AS src
-ON
-  src.foo = tgt.foo
+  ON
+    src.foo = tgt.foo
 WHEN MATCHED THEN
   UPDATE SET
     tgt.foo = src.foo;
diff --git a/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/test-config.yml b/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/test-config.yml
index 95733da..b2c79b0 100644
--- a/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/test-config.yml
+++ b/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/test-config.yml
@@ -1,9 +1,8 @@
 test-config:
   rules:
-    - L001
-    - L003
-    - L007
-    - L008
-    - L010
-    - L036
-    - L052
+    - LT01
+    - LT02
+    - LT03
+    - CP01
+    - LT09
+    - CV06
diff --git a/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/.sqlfluff b/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/.sqlfluff
index b8b2845..b038b73 100644
--- a/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/.sqlfluff
+++ b/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/.sqlfluff
@@ -1,8 +1,5 @@
 [sqlfluff]
 dialect = snowflake
 
-[sqlfluff:rules]
+[sqlfluff:indentation]
 tab_space_size = 2
-
-[sqlfluff:rules:L003]
-hanging_indents = False
diff --git a/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/after.sql b/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/after.sql
index 469c07c..82c5510 100644
--- a/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/after.sql
+++ b/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/after.sql
@@ -1,3 +1,4 @@
-set cutoff = (select foo
+set cutoff = (
+  select foo
 
 );
diff --git a/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/test-config.yml b/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/test-config.yml
index 40c59de..d7a03d2 100644
--- a/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/test-config.yml
+++ b/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/test-config.yml
@@ -1,4 +1,4 @@
 test-config:
   rules:
-    - L003
-    - L036
+    - LT02
+    - LT09
diff --git a/test/fixtures/linter/column_references.sql b/test/fixtures/linter/column_references.sql
index 5c5eaa0..695028a 100644
--- a/test/fixtures/linter/column_references.sql
+++ b/test/fixtures/linter/column_references.sql
@@ -1,3 +1,3 @@
 select a, b.c, d.g, f as f1, f1 + 1 as f2
 from z as a JOIN d using(f) where f2 > 1
--- NB: `f` appears in the USING clause and so shouldn't fail on L027
+-- NB: `f` appears in the USING clause and so shouldn't fail on RF02
diff --git a/test/fixtures/linter/operator_errors_ignore.sql b/test/fixtures/linter/operator_errors_ignore.sql
index 136f8bb..550cff8 100644
--- a/test/fixtures/linter/operator_errors_ignore.sql
+++ b/test/fixtures/linter/operator_errors_ignore.sql
@@ -6,6 +6,6 @@ are still present. No errors should be found on line 8 at all. */
 SELECT
     a.a + a.b AS good,
     a.a-a.b AS bad_1,  -- noqa
-    a.a*a.b AS bad_2,  -- noqa: L007, L006
-    a.a*a.b AS bad_3  -- noqa: L007
+    a.a*a.b AS bad_2,  -- noqa: LT01, LT03
+    a.a*a.b AS bad_3  -- noqa: LT03
 FROM tbl AS a
diff --git a/test/fixtures/rules/custom/L000.py b/test/fixtures/rules/custom/L000.py
index 86aa846..6b4bb22 100644
--- a/test/fixtures/rules/custom/L000.py
+++ b/test/fixtures/rules/custom/L000.py
@@ -1,8 +1,6 @@
 """Test std rule import."""
-from sqlfluff.core.rules.doc_decorators import document_groups
 
 
-@document_groups
 class Rule_L000:
     """Test std rule import."""
 
diff --git a/test/fixtures/rules/std_rule_cases/L011.yml b/test/fixtures/rules/std_rule_cases/AL01.yml
similarity index 74%
rename from test/fixtures/rules/std_rule_cases/L011.yml
rename to test/fixtures/rules/std_rule_cases/AL01.yml
index 147347c..0490b9d 100644
--- a/test/fixtures/rules/std_rule_cases/L011.yml
+++ b/test/fixtures/rules/std_rule_cases/AL01.yml
@@ -1,4 +1,4 @@
-rule: L011
+rule: AL01
 
 test_fail_default_explicit:
   # Add whitespace when fixing implicit aliasing
@@ -11,7 +11,7 @@ test_fail_explicit:
   fix_str: select foo.bar from table1 AS foo
   configs:
     rules:
-      L011:
+      aliasing.table:
         aliasing: explicit
 
 test_fail_implicit:
@@ -20,7 +20,7 @@ test_fail_implicit:
   fix_str: select foo.bar from table1 foo
   configs:
     rules:
-      L011:
+      aliasing.table:
         aliasing: implicit
 
 test_fail_implicit_alias:
@@ -39,7 +39,7 @@ test_fail_implicit_alias_explicit:
   fix_str: select foo.bar from (select 1 as bar) AS foo
   configs:
     rules:
-      L011:
+      aliasing.table:
         aliasing: explicit
 
 test_fail_implicit_alias_implicit:
@@ -48,7 +48,7 @@ test_fail_implicit_alias_implicit:
   fix_str: select foo.bar from (select 1 as bar) foo
   configs:
     rules:
-      L011:
+      aliasing.table:
         aliasing: implicit
 
 test_fail_implicit_alias_implicit_multiple:
@@ -57,11 +57,12 @@ test_fail_implicit_alias_implicit_multiple:
   fix_str: select foo.bar from (select 1 as bar) bar, (select 1 as foo) foo
   configs:
     rules:
-      L011:
+      aliasing.table:
         aliasing: implicit
 
 test_fail_implicit_alias_implicit_newline:
-  # Add whitespace when fixing implicit aliasing
+  # NOTE: Even when removing by a newline, we should still remove any duplicate
+  # whitespace.
   fail_str: |
       select foo.bar from (select 1 as bar)
       AS foo
@@ -70,7 +71,7 @@ test_fail_implicit_alias_implicit_newline:
       foo
   configs:
     rules:
-      L011:
+      aliasing.table:
         aliasing: implicit
 
 test_fail_default_explicit_alias_merge:
@@ -109,7 +110,7 @@ test_fail_explicit_alias_merge:
     core:
       dialect: bigquery
     rules:
-      L011:
+      aliasing.table:
         aliasing: explicit
 
 test_pass_implicit_alias_merge:
@@ -124,5 +125,41 @@ test_pass_implicit_alias_merge:
     core:
       dialect: bigquery
     rules:
-      L011:
+      aliasing.table:
         aliasing: implicit
+
+test_alias_expression_4492:
+  # Test failing alias expressions
+  # https://github.com/sqlfluff/sqlfluff/issues/4492
+  fail_str:
+    SELECT
+        voo.a
+    FROM foo voo
+  fix_str:
+    SELECT
+        voo.a
+    FROM foo AS voo
+  configs:
+    core:
+      dialect: snowflake
+    layout:
+      type:
+        alias_expression:
+          spacing_before: align
+
+test_alias_expression_4089:
+  # Test failing alias expressions
+  # https://github.com/sqlfluff/sqlfluff/issues/4089
+  fail_str:
+    SELECT
+    RANK() OVER (PARTITION BY Id ORDER BY Id DESC) nr_rank
+    FROM (values ('Amsterdam', 1), ('London', 2)) Cities(Name, Id)
+  fix_str:
+    SELECT
+    RANK() OVER (PARTITION BY Id ORDER BY Id DESC) nr_rank
+    FROM (values ('Amsterdam', 1), ('London', 2)) AS Cities(Name, Id)
+  configs:
+    layout:
+      type:
+        alias_expression:
+          spacing_before: align
diff --git a/test/fixtures/rules/std_rule_cases/L012.yml b/test/fixtures/rules/std_rule_cases/AL02.yml
similarity index 51%
rename from test/fixtures/rules/std_rule_cases/L012.yml
rename to test/fixtures/rules/std_rule_cases/AL02.yml
index 24a8b89..2bf31da 100644
--- a/test/fixtures/rules/std_rule_cases/L012.yml
+++ b/test/fixtures/rules/std_rule_cases/AL02.yml
@@ -1,4 +1,4 @@
-rule: L012
+rule: AL02
 
 issue_561:
   # Test for https://github.com/sqlfluff/sqlfluff/issues/561
@@ -23,7 +23,7 @@ test_fail_explicit_column_explicit:
   fix_str: select 1 AS bar from table1 b
   configs:
     rules:
-      L012:
+      aliasing.column:
         aliasing: explicit
 
 test_fail_explicit_column_implicit:
@@ -32,7 +32,7 @@ test_fail_explicit_column_implicit:
   fix_str: select 1 bar from table1 b
   configs:
     rules:
-      L012:
+      aliasing.column:
         aliasing: implicit
 
 test_pass_tsql_alternative_alias:
@@ -51,3 +51,62 @@ test_fail_alias_ending_raw_equals:
   # Test explicit column alias doesn't catch false positives
   fail_str: select col1 raw_equals
   fix_str: select col1 AS raw_equals
+
+
+test_alias_expression_align_4515_1:
+  # Test more failing alias expressions
+  fail_str: |
+    select
+        test a
+    from example_table
+  fix_str: |
+    select
+        test AS a
+    from example_table
+  configs:
+    layout:
+      type:
+        alias_expression:
+          spacing_before: align
+          align_within: select_clause
+          align_scope: bracketed
+
+test_alias_expression_align_4515_2:
+  # Test more failing alias expressions
+  fail_str: |
+    select
+        test a,
+        test b
+    from example_table
+  fix_str: |
+    select
+        test AS a,
+        test AS b
+    from example_table
+  configs:
+    layout:
+      type:
+        alias_expression:
+          spacing_before: align
+          align_within: select_clause
+          align_scope: bracketed
+
+test_alias_expression_align_4515_3:
+  # Test more failing alias expressions
+  fail_str: |
+    select
+        testy_testy_testy a,
+        test b
+    from example_table
+  fix_str: |
+    select
+        testy_testy_testy AS a,
+        test              AS b
+    from example_table
+  configs:
+    layout:
+      type:
+        alias_expression:
+          spacing_before: align
+          align_within: select_clause
+          align_scope: bracketed
diff --git a/test/fixtures/rules/std_rule_cases/L013.yml b/test/fixtures/rules/std_rule_cases/AL03.yml
similarity index 94%
rename from test/fixtures/rules/std_rule_cases/L013.yml
rename to test/fixtures/rules/std_rule_cases/AL03.yml
index 3c24557..55e6972 100644
--- a/test/fixtures/rules/std_rule_cases/L013.yml
+++ b/test/fixtures/rules/std_rule_cases/AL03.yml
@@ -1,10 +1,10 @@
-rule: L013
+rule: AL03
 
 test_pass_column_exp_without_alias_1:
   pass_str: SELECT *, foo from blah
 
 test_pass_column_exp_without_alias_2:
-  # L013 fix with https://github.com/sqlfluff/sqlfluff/issues/449
+  # AL03 fix with https://github.com/sqlfluff/sqlfluff/issues/449
   pass_str: select ps.*, pandgs.blah from ps join pandgs using(moo)
 
 test_pass_column_exp_without_alias_allow_scalar_true:
@@ -15,7 +15,7 @@ test_fail_column_exp_without_alias:
   fail_str: SELECT upper(foo), bar from blah
 
 # Casting (via "::TYPE" syntax) has no effect on column output naming
-# and L013 therefore shouldnt be applied
+# and AL03 therefore shouldnt be applied
 test_pass_column_exp_without_alias_if_only_cast:
   pass_str: SELECT foo_col::VARCHAR(28) , bar from blah
 
diff --git a/test/fixtures/rules/std_rule_cases/L020.yml b/test/fixtures/rules/std_rule_cases/AL04.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L020.yml
rename to test/fixtures/rules/std_rule_cases/AL04.yml
index 339c25e..30a22cd 100644
--- a/test/fixtures/rules/std_rule_cases/L020.yml
+++ b/test/fixtures/rules/std_rule_cases/AL04.yml
@@ -1,4 +1,4 @@
-rule: L020
+rule: AL04
 
 test_fail_exactly_once_duplicated_aliases:
   # duplicate aliases
diff --git a/test/fixtures/rules/std_rule_cases/L025.yml b/test/fixtures/rules/std_rule_cases/AL05.yml
similarity index 95%
rename from test/fixtures/rules/std_rule_cases/L025.yml
rename to test/fixtures/rules/std_rule_cases/AL05.yml
index 4454e4d..2e638b8 100644
--- a/test/fixtures/rules/std_rule_cases/L025.yml
+++ b/test/fixtures/rules/std_rule_cases/AL05.yml
@@ -1,4 +1,4 @@
-rule: L025
+rule: AL05
 
 test_fail_table_alias_not_referenced_1:
   # Aliases not referenced.
@@ -17,11 +17,11 @@ test_pass_table_alias_referenced:
   pass_str: SELECT * FROM my_tbl AS foo JOIN other_tbl on other_tbl.x = foo.x
 
 test_pass_unaliased_table_referenced:
-  # L025 fix with https://github.com/sqlfluff/sqlfluff/issues/449
+  # AL05 fix with https://github.com/sqlfluff/sqlfluff/issues/449
   pass_str: select ps.*, pandgs.blah from ps join pandgs using(moo)
 
 test_ignore_bigquery_value_table_functions:
-  # L025 fix with https://github.com/sqlfluff/sqlfluff/issues/356
+  # AL05 fix with https://github.com/sqlfluff/sqlfluff/issues/356
   pass_str: |
     select *
     from unnest(generate_timestamp_array(
@@ -31,7 +31,7 @@ test_ignore_bigquery_value_table_functions:
       dialect: bigquery
 
 test_ignore_postgres_value_table_functions:
-  # L025 fix with https://github.com/sqlfluff/sqlfluff/issues/3051
+  # AL05 fix with https://github.com/sqlfluff/sqlfluff/issues/3051
   pass_str: |
     SELECT json_build_object(
         'name', 'ticket_status',
@@ -44,7 +44,7 @@ test_ignore_postgres_value_table_functions:
       dialect: postgres
 
 test_ignore_postgres_value_table_functions_generate_series:
-  # L025 fix with https://github.com/sqlfluff/sqlfluff/issues/3462
+  # AL05 fix with https://github.com/sqlfluff/sqlfluff/issues/3462
   pass_str: |
     SELECT
       date_trunc('day', dd):: timestamp with time zone
@@ -71,7 +71,7 @@ test_pass_subquery_alias_not_referenced:
 test_pass_bigquery_unaliased_table_with_hyphens:
   # Test non-quoted table name containing hyphens: https://github.com/sqlfluff/sqlfluff/issues/895
   # This is more of a smoke test to exercise the
-  # ObjectReferenceSegment.extract_reference() function, which is used by L025
+  # ObjectReferenceSegment.extract_reference() function, which is used by AL05
   # and in turn calls HyphenatedObjectReferenceSegment.iter_raw_references().
   pass_str: |
     select *
@@ -93,7 +93,7 @@ test_pass_bigquery_aliased_table_with_ticks_referenced:
 
 test_pass_tsql_object_reference_override:
   # T-SQL Overrides the ObjectReferenceSegment so needs to have the _level_to_int
-  # static method set (as a static method!) or rule L025 fails.
+  # static method set (as a static method!) or rule AL05 fails.
   # https://github.com/sqlfluff/sqlfluff/issues/1669
   pass_str: SELECT a FROM b
   configs:
diff --git a/test/fixtures/rules/std_rule_cases/L066.yml b/test/fixtures/rules/std_rule_cases/AL06.yml
similarity index 94%
rename from test/fixtures/rules/std_rule_cases/L066.yml
rename to test/fixtures/rules/std_rule_cases/AL06.yml
index 92958a7..12c3ce9 100644
--- a/test/fixtures/rules/std_rule_cases/L066.yml
+++ b/test/fixtures/rules/std_rule_cases/AL06.yml
@@ -1,4 +1,4 @@
-rule: L066
+rule: AL06
 
 test_pass_no_config:
   pass_str: |
@@ -20,7 +20,7 @@ test_fail_alias_too_short:
     JOIN orders as o on u.id = o.user_id;
   configs:
     rules:
-      L066:
+      aliasing.length:
         min_alias_length: 4
 
 test_fail_alias_too_long:
@@ -35,7 +35,7 @@ test_fail_alias_too_long:
     JOIN orders as o on u.id = o.user_id;
   configs:
     rules:
-      L066:
+      aliasing.length:
         max_alias_length: 10
 
 test_fail_alias_min_and_max:
@@ -50,7 +50,7 @@ test_fail_alias_min_and_max:
     JOIN orders as o on u.id = o.user_id;
   configs:
     rules:
-      L066:
+      aliasing.length:
         min_alias_length: 4
         max_alias_length: 10
 
@@ -66,6 +66,6 @@ test_pass_with_config:
     JOIN orders as latest_orders on users.id = latest_orders.user_id;
   configs:
     rules:
-      L066:
+      aliasing.length:
         min_alias_length: 10
         max_alias_length: 30
diff --git a/test/fixtures/rules/std_rule_cases/L031.yml b/test/fixtures/rules/std_rule_cases/AL07.yml
similarity index 66%
rename from test/fixtures/rules/std_rule_cases/L031.yml
rename to test/fixtures/rules/std_rule_cases/AL07.yml
index 52604ab..795a28d 100644
--- a/test/fixtures/rules/std_rule_cases/L031.yml
+++ b/test/fixtures/rules/std_rule_cases/AL07.yml
@@ -1,13 +1,17 @@
-rule: L031
+rule: AL07
 
 test_pass_allow_self_join_alias:
-  # L031 Allow self-joins
+  # AL07 Allow self-joins
   pass_str: |
     select
       x.a,
       x_2.b
     from x
     left join x as x_2 on x.foreign_key = x.foreign_key
+  configs:
+    rules:
+      aliasing.forbid:
+        force_enable: true
 
 test_fail_avoid_aliases_1:
   fail_str: |
@@ -29,9 +33,13 @@ test_fail_avoid_aliases_1:
     FROM users
     JOIN customers on users.id = customers.user_id
     JOIN orders on users.id = orders.user_id;
+  configs:
+    rules:
+      aliasing.forbid:
+        force_enable: true
 
 test_fail_avoid_aliases_2:
-  # L031 order by
+  # AL07 order by
   fail_str: |
     SELECT
       u.id,
@@ -53,9 +61,13 @@ test_fail_avoid_aliases_2:
     JOIN customers on users.id = customers.user_id
     JOIN orders on users.id = orders.user_id
     order by orders.user_id desc
+  configs:
+    rules:
+      aliasing.forbid:
+        force_enable: true
 
 test_fail_avoid_aliases_3:
-  # L031 order by identifier which is the same raw as an alias but refers to a column
+  # AL07 order by identifier which is the same raw as an alias but refers to a column
   fail_str: |
     SELECT
       u.id,
@@ -77,19 +89,35 @@ test_fail_avoid_aliases_3:
     JOIN customers on users.id = customers.user_id
     JOIN orders on users.id = orders.user_id
     order by o desc
+  configs:
+    rules:
+      aliasing.forbid:
+        force_enable: true
 
 alias_single_char_identifiers:
   fail_str: "select b from tbl as a"
   fix_str: "select b from tbl"
+  configs:
+    rules:
+      aliasing.forbid:
+        force_enable: true
 
 alias_with_wildcard_identifier:
   fail_str: "select * from tbl as a"
   fix_str: "select * from tbl"
+  configs:
+    rules:
+      aliasing.forbid:
+        force_enable: true
 
 select_from_values:
   pass_str: |
     select *
     from values(1, 2, 3)
+  configs:
+    rules:
+      aliasing.forbid:
+        force_enable: true
 
 select_from_table_generator:
   pass_str: |
@@ -103,6 +131,9 @@ select_from_table_generator:
   configs:
     core:
       dialect: snowflake
+    rules:
+      aliasing.forbid:
+        force_enable: true
 
 issue_635:
   pass_str: |
@@ -118,6 +149,9 @@ issue_635:
   configs:
     core:
       dialect: snowflake
+    rules:
+      aliasing.forbid:
+        force_enable: true
 
 # This query was causing a runtime error in the rule.
 issue_239:
@@ -157,6 +191,10 @@ issue_610:
     FROM aaaaaa
     JOIN bbbbbb AS b ON b.a = aaaaaa.id
     JOIN bbbbbb AS b2 ON b2.other = b.id
+  configs:
+    rules:
+      aliasing.forbid:
+        force_enable: true
 
 issue_1589:
   pass_str: |
@@ -168,6 +206,10 @@ issue_1589:
               rnd>=t.v
          order by rnd
          limit 1)
+  configs:
+    rules:
+      aliasing.forbid:
+        force_enable: true
 
 issue_1639:
   fail_str: |
@@ -193,6 +235,9 @@ issue_1639:
   configs:
     core:
       dialect: tsql
+    rules:
+      aliasing.forbid:
+        force_enable: true
 
 test_fail_no_copy_code_out_of_template:
   # The rule wants to replace "t" with "foobar", but
@@ -206,6 +251,9 @@ test_fail_no_copy_code_out_of_template:
       jinja:
         context:
           source_table: foobar
+    rules:
+      aliasing.forbid:
+        force_enable: true
 
 test_bigquery_skip_multipart_names:
   pass_str: |
@@ -230,5 +278,60 @@ test_bigquery_force_enable:
     core:
       dialect: bigquery
     rules:
-      L031:
+      aliasing.forbid:
+        force_enable: true
+
+test_violation_locations:
+  fail_str: |
+    SELECT
+        u.id,
+        c.first_name,
+        c.last_name,
+        COUNT(o.user_id)
+    FROM users as u
+    JOIN customers as c on u.id = c.user_id
+    JOIN orders as o on u.id = o.user_id;
+  fix_str: |
+    SELECT
+        users.id,
+        customers.first_name,
+        customers.last_name,
+        COUNT(orders.user_id)
+    FROM users
+    JOIN customers on users.id = customers.user_id
+    JOIN orders on users.id = orders.user_id;
+  configs:
+    rules:
+      aliasing.forbid:
+        force_enable: true
+  violations:
+    - code: AL07
+      description: Avoid aliases in from clauses and join conditions.
+      line_no: 6
+      line_pos: 15
+      name: aliasing.forbid
+    - code: AL07
+      description: Avoid aliases in from clauses and join conditions.
+      line_no: 7
+      line_pos: 19
+      name: aliasing.forbid
+    - code: AL07
+      description: Avoid aliases in from clauses and join conditions.
+      line_no: 8
+      line_pos: 16
+      name: aliasing.forbid
+
+test_fail_fix_command:
+  # Test originally from commands_test.py
+  fail_str: |
+    SELECT u.id, c.first_name, c.last_name, COUNT(o.user_id)
+    FROM users as u JOIN customers as c on u.id = c.user_id JOIN orders as o
+    on u.id = o.user_id;
+  fix_str: |
+    SELECT users.id, customers.first_name, customers.last_name, COUNT(orders.user_id)
+    FROM users JOIN customers on users.id = customers.user_id JOIN orders
+    on users.id = orders.user_id;
+  configs:
+    rules:
+      aliasing.forbid:
         force_enable: true
diff --git a/test/fixtures/rules/std_rule_cases/L021.yml b/test/fixtures/rules/std_rule_cases/AM01.yml
similarity index 94%
rename from test/fixtures/rules/std_rule_cases/L021.yml
rename to test/fixtures/rules/std_rule_cases/AM01.yml
index 2a223af..8fcd45d 100644
--- a/test/fixtures/rules/std_rule_cases/L021.yml
+++ b/test/fixtures/rules/std_rule_cases/AM01.yml
@@ -1,4 +1,4 @@
-rule: L021
+rule: AM01
 
 test_pass_only_group_by:
   # check if using select distinct and group by
diff --git a/test/fixtures/rules/std_rule_cases/L033.yml b/test/fixtures/rules/std_rule_cases/AM02.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L033.yml
rename to test/fixtures/rules/std_rule_cases/AM02.yml
index 4c41864..8d55afb 100644
--- a/test/fixtures/rules/std_rule_cases/L033.yml
+++ b/test/fixtures/rules/std_rule_cases/AM02.yml
@@ -1,4 +1,4 @@
-rule: L033
+rule: AM02
 
 test_pass_union_all:
   pass_str: |
diff --git a/test/fixtures/rules/std_rule_cases/L037.yml b/test/fixtures/rules/std_rule_cases/AM03.yml
similarity index 87%
rename from test/fixtures/rules/std_rule_cases/L037.yml
rename to test/fixtures/rules/std_rule_cases/AM03.yml
index cb68850..efd825b 100644
--- a/test/fixtures/rules/std_rule_cases/L037.yml
+++ b/test/fixtures/rules/std_rule_cases/AM03.yml
@@ -1,4 +1,4 @@
-rule: L037
+rule: AM03
 
 test_unspecified:
   pass_str: SELECT * FROM t ORDER BY a
@@ -29,3 +29,7 @@ test_desc_asc:
 test_nulls_last:
   fail_str: SELECT * FROM t ORDER BY a DESC, b NULLS LAST
   fix_str: SELECT * FROM t ORDER BY a DESC, b ASC NULLS LAST
+
+
+test_comment:
+  pass_str: SELECT * FROM t ORDER BY a /* Comment */ DESC, b ASC
diff --git a/test/fixtures/rules/std_rule_cases/L044.yml b/test/fixtures/rules/std_rule_cases/AM04.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L044.yml
rename to test/fixtures/rules/std_rule_cases/AM04.yml
index e030b4f..38a5efb 100644
--- a/test/fixtures/rules/std_rule_cases/L044.yml
+++ b/test/fixtures/rules/std_rule_cases/AM04.yml
@@ -1,4 +1,4 @@
-rule: L044
+rule: AM04
 
 test_pass_known_number_of_result_columns_1:
   pass_str: select a, b from t
diff --git a/test/fixtures/rules/std_rule_cases/L051.yml b/test/fixtures/rules/std_rule_cases/AM05.yml
similarity index 90%
rename from test/fixtures/rules/std_rule_cases/L051.yml
rename to test/fixtures/rules/std_rule_cases/AM05.yml
index 22efb00..9bc40ff 100644
--- a/test/fixtures/rules/std_rule_cases/L051.yml
+++ b/test/fixtures/rules/std_rule_cases/AM05.yml
@@ -1,4 +1,4 @@
-rule: L051
+rule: AM05
 
 # Default config
 test_fail_lone_join_default:
@@ -39,7 +39,7 @@ test_fail_lone_join_inner:
   fix_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: inner
 
 test_fail_lone_join_lowercase_inner:
@@ -47,42 +47,42 @@ test_fail_lone_join_lowercase_inner:
   fix_str: "SELECT foo.a, bar.b FROM foo inner join bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: inner
 
 test_pass_inner_join_inner:
   pass_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: inner
 
 test_pass_left_join_inner:
   pass_str: "SELECT foo.a, bar.b FROM foo LEFT JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: inner
 
 test_pass_right_join_inner:
   pass_str: "SELECT foo.a, bar.b FROM foo RIGHT JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: inner
 
 test_pass_full_join_inner:
   pass_str: "SELECT foo.a, bar.b FROM foo FULL JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: inner
 
 test_pass_left_outer_join_inner:
   pass_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: inner
 
 
@@ -90,14 +90,14 @@ test_pass_right_outer_join_inner:
   pass_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: inner
 
 test_pass_full_outer_join_inner:
   pass_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: inner
 
 # Config = "outer"
@@ -105,14 +105,14 @@ test_pass_lone_join_outer:
   pass_str: "SELECT foo.a, bar.b FROM foo JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: outer
 
 test_pass_inner_join_outer:
   pass_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: outer
 
 test_fail_left_join_outer:
@@ -120,7 +120,7 @@ test_fail_left_join_outer:
   fix_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: outer
 
 test_fail_right_join_outer:
@@ -128,7 +128,7 @@ test_fail_right_join_outer:
   fix_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: outer
 
 test_fail_full_join_outer:
@@ -136,7 +136,7 @@ test_fail_full_join_outer:
   fix_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: outer
 
 test_fail_full_join_lowercase_outer:
@@ -144,14 +144,14 @@ test_fail_full_join_lowercase_outer:
   fix_str: "SELECT foo.a, bar.b FROM foo full outer join bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: outer
 
 test_pass_left_outer_join_outer:
   pass_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: outer
 
 
@@ -159,14 +159,14 @@ test_pass_right_outer_join_outer:
   pass_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: outer
 
 test_pass_full_outer_join_outer:
   pass_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: outer
 
 # Config = "both"
@@ -175,7 +175,7 @@ test_fail_lone_join_both:
   fix_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: both
 
 test_fail_lone_join_lowercase_both:
@@ -183,14 +183,14 @@ test_fail_lone_join_lowercase_both:
   fix_str: "SELECT foo.a, bar.b FROM foo inner join bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: both
 
 test_pass_inner_join_both:
   pass_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: both
 
 test_fail_left_join_both:
@@ -198,7 +198,7 @@ test_fail_left_join_both:
   fix_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: both
 
 test_fail_right_join_both:
@@ -206,7 +206,7 @@ test_fail_right_join_both:
   fix_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: both
 
 test_fail_full_join_both:
@@ -214,7 +214,7 @@ test_fail_full_join_both:
   fix_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: both
 
 test_fail_full_join_lowercase_both:
@@ -222,14 +222,14 @@ test_fail_full_join_lowercase_both:
   fix_str: "SELECT foo.a, bar.b FROM foo full outer join bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: both
 
 test_pass_left_outer_join_both:
   pass_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: both
 
 
@@ -237,12 +237,12 @@ test_pass_right_outer_join_both:
   pass_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: both
 
 test_pass_full_outer_join_both:
   pass_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n"
   configs:
     rules:
-      L051:
+      ambiguous.join:
         fully_qualify_join_types: both
diff --git a/test/fixtures/rules/std_rule_cases/L054.yml b/test/fixtures/rules/std_rule_cases/AM06.yml
similarity index 91%
rename from test/fixtures/rules/std_rule_cases/L054.yml
rename to test/fixtures/rules/std_rule_cases/AM06.yml
index c67db70..03d187b 100644
--- a/test/fixtures/rules/std_rule_cases/L054.yml
+++ b/test/fixtures/rules/std_rule_cases/AM06.yml
@@ -1,4 +1,4 @@
-rule: L054
+rule: AM06
 
 test_pass_explicit_group_by_default:
   pass_str: |
@@ -127,7 +127,7 @@ test_pass_explicit_group_by_custom_explicit:
         foo, bar;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: explicit
 
 test_fail_implicit_group_by_custom_explicit:
@@ -141,7 +141,7 @@ test_fail_implicit_group_by_custom_explicit:
         1, 2;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: explicit
 
 test_fail_mix_group_by_custom_explicit:
@@ -155,7 +155,7 @@ test_fail_mix_group_by_custom_explicit:
         1, bar;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: explicit
 
 test_pass_explicit_order_by_custom_explicit:
@@ -168,7 +168,7 @@ test_pass_explicit_order_by_custom_explicit:
         foo, bar;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: explicit
 
 test_fail_implicit_order_by_custom_explicit:
@@ -181,7 +181,7 @@ test_fail_implicit_order_by_custom_explicit:
         1, 2;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: explicit
 
 test_pass_explicit_group_by_and_order_by_custom_explicit:
@@ -197,7 +197,7 @@ test_pass_explicit_group_by_and_order_by_custom_explicit:
         foo, bar;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: explicit
 
 test_fail_implicit_group_by_and_order_by_custom_explicit:
@@ -213,7 +213,7 @@ test_fail_implicit_group_by_and_order_by_custom_explicit:
         1, 2;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: explicit
 
 test_fail_within_line_mix_group_by_and_order_by_custom_explicit:
@@ -229,7 +229,7 @@ test_fail_within_line_mix_group_by_and_order_by_custom_explicit:
         foo, 2;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: explicit
 
 test_fail_across_line_mix_group_by_and_order_by_custom_explicit:
@@ -245,7 +245,7 @@ test_fail_across_line_mix_group_by_and_order_by_custom_explicit:
         foo, bar;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: explicit
 
 test_pass_explicit_expression_order_by_custom_explicit:
@@ -259,7 +259,7 @@ test_pass_explicit_expression_order_by_custom_explicit:
         foo, power(bar, 2)
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: explicit
 
 test_fail_implicit_expression_order_by_custom_explicit:
@@ -273,7 +273,7 @@ test_fail_implicit_expression_order_by_custom_explicit:
         1, power(bar, 2)
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: explicit
 
 test_pass_explicit_group_by_custom_implicit:
@@ -287,7 +287,7 @@ test_pass_explicit_group_by_custom_implicit:
         1, 2;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: implicit
 
 test_fail_implicit_group_by_custom_implicit:
@@ -301,7 +301,7 @@ test_fail_implicit_group_by_custom_implicit:
         foo, bar;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: implicit
 
 test_pass_explicit_order_by_custom_implicit:
@@ -314,7 +314,7 @@ test_pass_explicit_order_by_custom_implicit:
         1, 2;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: implicit
 
 test_fail_implicit_order_by_custom_implicit:
@@ -327,7 +327,7 @@ test_fail_implicit_order_by_custom_implicit:
         foo, bar;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: implicit
 
 test_fail_mix_group_by_custom_implicit:
@@ -341,7 +341,7 @@ test_fail_mix_group_by_custom_implicit:
         1, bar;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: implicit
 
 test_pass_implicit_group_by_and_order_by_custom_implicit:
@@ -357,7 +357,7 @@ test_pass_implicit_group_by_and_order_by_custom_implicit:
         1, 2;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: implicit
 
 test_fail_explicit_group_by_and_order_by_custom_implicit:
@@ -373,7 +373,7 @@ test_fail_explicit_group_by_and_order_by_custom_implicit:
         foo, bar;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: implicit
 
 test_fail_within_line_mix_group_by_and_order_by_custom_implicit:
@@ -389,7 +389,7 @@ test_fail_within_line_mix_group_by_and_order_by_custom_implicit:
         foo, 2;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: implicit
 
 test_fail_across_line_mix_group_by_and_order_by_custom_implicit:
@@ -405,7 +405,7 @@ test_fail_across_line_mix_group_by_and_order_by_custom_implicit:
         foo, bar;
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: implicit
 
 test_fail_explicit_expression_order_by_custom_implicit:
@@ -419,7 +419,7 @@ test_fail_explicit_expression_order_by_custom_implicit:
         foo, power(bar, 2)
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: implicit
 
 test_fail_implicit_expression_order_by_custom_implicit:
@@ -433,7 +433,7 @@ test_fail_implicit_expression_order_by_custom_implicit:
         1, power(bar, 2)
   configs:
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: implicit
 
 test_fail_consistent_snowflake:
@@ -449,7 +449,7 @@ test_fail_consistent_snowflake:
     core:
       dialect: snowflake
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: consistent
 
 test_fail_consistent_exasol:
@@ -465,7 +465,7 @@ test_fail_consistent_exasol:
     core:
       dialect: exasol
     rules:
-      L054:
+      ambiguous.column_references:
         group_by_and_order_by_style: consistent
 
 test_pass_window:
diff --git a/test/fixtures/rules/std_rule_cases/L068.yml b/test/fixtures/rules/std_rule_cases/AM07.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L068.yml
rename to test/fixtures/rules/std_rule_cases/AM07.yml
index a596d96..f2e9786 100644
--- a/test/fixtures/rules/std_rule_cases/L068.yml
+++ b/test/fixtures/rules/std_rule_cases/AM07.yml
@@ -1,4 +1,4 @@
-rule: L068
+rule: AM07
 
 test_pass_known_number_of_result_columns_1:
   pass_str: |
diff --git a/test/fixtures/rules/std_rule_cases/L010.yml b/test/fixtures/rules/std_rule_cases/CP01.yml
similarity index 85%
rename from test/fixtures/rules/std_rule_cases/L010.yml
rename to test/fixtures/rules/std_rule_cases/CP01.yml
index 899b761..80a662d 100644
--- a/test/fixtures/rules/std_rule_cases/L010.yml
+++ b/test/fixtures/rules/std_rule_cases/CP01.yml
@@ -1,4 +1,4 @@
-rule: L010
+rule: CP01
 
 test_fail_inconsistent_capitalisation_1:
   # Test that we don't have the "inconsistent" bug
@@ -15,7 +15,7 @@ test_fail_capitalisation_policy_lower:
   fix_str: select * from MOO order by dt desc
   configs:
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: lower
 
 test_fail_capitalisation_policy_upper:
@@ -24,7 +24,7 @@ test_fail_capitalisation_policy_upper:
   fix_str: SELECT * FROM MOO ORDER BY dt DESC
   configs:
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: upper
 
 test_fail_capitalisation_policy_capitalise:
@@ -33,7 +33,7 @@ test_fail_capitalisation_policy_capitalise:
   fix_str: Select * From MOO Order By dt Desc
   configs:
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: capitalise
 
 test_fail_date_part_inconsistent_capitalisation:
@@ -47,7 +47,7 @@ test_fail_date_part_capitalisation_policy_lower:
   fix_str: select dt + interval 2 day, interval 3 hour
   configs:
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: lower
 
 test_fail_date_part_capitalisation_policy_upper:
@@ -56,7 +56,7 @@ test_fail_date_part_capitalisation_policy_upper:
   fix_str: SELECT dt + INTERVAL 2 DAY, INTERVAL 3 HOUR
   configs:
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: upper
 
 test_pass_date_part_consistent_capitalisation:
@@ -69,7 +69,7 @@ test_pass_data_type_inconsistent_capitalisation:
   pass_str: CREATE TABLE table1 (account_id bigint);
   configs:
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: upper
 
 test_pass_bigquery_date:
@@ -78,14 +78,14 @@ test_pass_bigquery_date:
     core:
       dialect: bigquery
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: upper
 
 test_pass_ignore_word:
   pass_str: SeleCT 1
   configs:
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: upper
         ignore_words: select
 
@@ -93,7 +93,7 @@ test_pass_ignore_words:
   pass_str: SeleCT 1
   configs:
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: upper
         ignore_words: select,from
 
@@ -101,7 +101,7 @@ test_pass_ignore_words_regex_simple:
   pass_str: SeleCT 1
   configs:
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: upper
         ignore_words_regex: ^Se
 
@@ -110,7 +110,7 @@ test_fail_ignore_words_regex_simple:
   fix_str: SeleCT 1 FROM t_table
   configs:
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: upper
         ignore_words_regex: ^Se
 
@@ -118,15 +118,31 @@ test_pass_ignore_words_complex:
   pass_str: SeleCT 1 FrOM t_table
   configs:
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: upper
         ignore_words_regex: (^Se|^Fr)
 
-test_pass_ignore_templated_code:
+test_pass_ignore_templated_code_true:
   pass_str: |
     {{ "select" }} a
     FROM foo
     WHERE 1
+  configs:
+    core:
+      ignore_templated_areas: true
+
+test_fail_ignore_templated_code_false:
+  fail_str: |
+    {{ "select" }} a
+    FROM foo
+    WHERE 1
+  fix_str: |
+    {{ "select" }} a
+    from foo
+    where 1
+  configs:
+    core:
+      ignore_templated_areas: false
 
 test_fail_snowflake_group_by_cube:
   fail_str: |
@@ -147,7 +163,7 @@ test_fail_snowflake_group_by_cube:
     core:
       dialect: snowflake
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: upper
 
 test_pass_ignore_null:
@@ -175,7 +191,7 @@ test_fail_bigquery_week:
     core:
       dialect: bigquery
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: upper
 
 test_fail_select_lower:
@@ -193,7 +209,7 @@ test_fail_select_lower:
     core:
       dialect: tsql
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: upper
 
 test_fail_select_lower_keyword_functions:
@@ -210,5 +226,5 @@ test_fail_select_lower_keyword_functions:
     core:
       dialect: tsql
     rules:
-      L010:
+      capitalisation.keywords:
         capitalisation_policy: upper
diff --git a/test/fixtures/rules/std_rule_cases/L014.yml b/test/fixtures/rules/std_rule_cases/CP02.yml
similarity index 89%
rename from test/fixtures/rules/std_rule_cases/L014.yml
rename to test/fixtures/rules/std_rule_cases/CP02.yml
index 1a002c6..1b730b9 100644
--- a/test/fixtures/rules/std_rule_cases/L014.yml
+++ b/test/fixtures/rules/std_rule_cases/CP02.yml
@@ -1,4 +1,4 @@
-rule: L014
+rule: CP02
 
 test_pass_consistent_capitalisation_1:
   pass_str: SELECT a, b
@@ -44,14 +44,14 @@ test_pass_consistent_capitalisation_policy_pascal_1:
   pass_str: SELECT PascalCase
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         extended_capitalisation_policy: pascal
 
 test_pass_consistent_capitalisation_policy_pascal_2:
   pass_str: SELECT Pascalcase
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         extended_capitalisation_policy: pascal
 
 test_pass_consistent_capitalisation_policy_pascal_3:
@@ -59,21 +59,21 @@ test_pass_consistent_capitalisation_policy_pascal_3:
   fix_str: SELECT PascalCase
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         extended_capitalisation_policy: pascal
 
 test_pass_consistent_capitalisation_policy_pascal_4:
   pass_str: SELECT PasCalCaSe
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         extended_capitalisation_policy: pascal
 
 test_pass_consistent_capitalisation_policy_pascal_5:
   pass_str: SELECT PAscalcase
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         extended_capitalisation_policy: pascal
 
 test_pass_consistent_capitalisation_policy_pascal_6:
@@ -84,7 +84,7 @@ test_pass_consistent_capitalisation_policy_pascal_6:
   pass_str: SELECT PASCALCASE
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         extended_capitalisation_policy: pascal
 
 test_fail_inconsistent_capitalisation_policy_pascal_1:
@@ -92,7 +92,7 @@ test_fail_inconsistent_capitalisation_policy_pascal_1:
   fix_str: SELECT Pascalcase
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         extended_capitalisation_policy: pascal
 
 test_fail_inconsistent_capitalisation_policy_pascal_2:
@@ -100,7 +100,7 @@ test_fail_inconsistent_capitalisation_policy_pascal_2:
   fix_str: SELECT Pascal_Case
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         extended_capitalisation_policy: pascal
 
 test_fail_inconsistent_capitalisation_policy_pascal_3:
@@ -110,7 +110,7 @@ test_fail_inconsistent_capitalisation_policy_pascal_3:
   fix_str: SELECT PASCAL_CASE
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         extended_capitalisation_policy: pascal
 
 test_fail_inconsistent_capitalisation_policy_pascal_4:
@@ -118,7 +118,7 @@ test_fail_inconsistent_capitalisation_policy_pascal_4:
   fix_str: SELECT pascalcase
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         extended_capitalisation_policy: lower
 
 test_fail_consistent_capitalisation_policy_pascal_5:
@@ -126,7 +126,7 @@ test_fail_consistent_capitalisation_policy_pascal_5:
   fix_str: SELECT PASCALCASENAME
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         extended_capitalisation_policy: upper
 
 test_fail_inconsistent_capitalisation_pascal_v_capitalise:
@@ -138,14 +138,14 @@ test_pass_policy_unquoted_identifiers_aliases_1:
   pass_str: SELECT a,   B
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         unquoted_identifiers_policy: aliases
 
 test_pass_policy_unquoted_identifiers_aliases_2:
   pass_str: SELECT B,   a
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         unquoted_identifiers_policy: aliases
 
 test_pass_policy_unquoted_identifiers_aliases_3:
@@ -154,7 +154,7 @@ test_pass_policy_unquoted_identifiers_aliases_3:
   pass_str: SELECT PASCAL_CASE
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         extended_capitalisation_policy: pascal
         unquoted_identifiers_policy: aliases
 
@@ -162,7 +162,7 @@ test_pass_policy_unquoted_identifiers_aliases_4:
   pass_str: SELECT UPPER_CASE AS low_case, PascalCase AS low_case
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         unquoted_identifiers_policy: aliases
 
 test_policy_unquoted_identifiers_aliases_5:
@@ -170,7 +170,7 @@ test_policy_unquoted_identifiers_aliases_5:
   fix_str: SELECT UPPER_CASE AS PascalCase, PascalCase AS Lower_Case
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         unquoted_identifiers_policy: aliases
 
 test_policy_unquoted_identifiers_aliases_6:
@@ -178,7 +178,7 @@ test_policy_unquoted_identifiers_aliases_6:
   fix_str: SELECT UPPER_CASE AS PASCALCASE, PascalCase AS LOWER_CASE
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         extended_capitalisation_policy: upper
         unquoted_identifiers_policy: aliases
 
@@ -187,7 +187,7 @@ test_policy_unquoted_identifiers_aliases_7:
   fix_str: SELECT UPPER_CASE AS low_case, PascalCase AS low_case FROM UPPER_CASE AS upper_case
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         unquoted_identifiers_policy: aliases
 
 test_policy_unquoted_identifiers_aliases_8:
@@ -195,14 +195,14 @@ test_policy_unquoted_identifiers_aliases_8:
   fix_str: SELECT UPPER_CASE AS PascalCase, PascalCase AS Lower_Case FROM lower_case AS Lower_Case
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         unquoted_identifiers_policy: aliases
 
 test_policy_unquoted_identifiers_column_aliases_1:
   pass_str: SELECT UPPER_CASE AS low_case, PascalCase AS low_case FROM UPPER_CASE AS UPPER_CASE
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         unquoted_identifiers_policy: column_aliases
 
 test_policy_unquoted_identifiers_aliases_2:
@@ -210,14 +210,14 @@ test_policy_unquoted_identifiers_aliases_2:
   fix_str: SELECT UPPER_CASE AS PascalCase, PascalCase AS Lower_Case FROM lower_case AS lower_case
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         unquoted_identifiers_policy: column_aliases
 
 test_pass_ignore_word:
   pass_str: SELECT A, b
   configs:
     rules:
-      L014:
+      capitalisation.identifiers:
         capitalisation_policy: upper
         ignore_words: b
 
@@ -248,8 +248,8 @@ test_pass_bigquery_safe_does_not_trigger:
     core:
       dialect: bigquery
 
-test_pass_sparksql_case_sensitive_property:
+test_pass_databricks_case_sensitive_property:
   pass_str: SET spark.databricks.delta.properties.defaults.enableChangeDataFeed = true;
   configs:
     core:
-      dialect: sparksql
+      dialect: databricks
diff --git a/test/fixtures/rules/std_rule_cases/L030.yml b/test/fixtures/rules/std_rule_cases/CP03.yml
similarity index 79%
rename from test/fixtures/rules/std_rule_cases/L030.yml
rename to test/fixtures/rules/std_rule_cases/CP03.yml
index 1a24012..0e98be7 100644
--- a/test/fixtures/rules/std_rule_cases/L030.yml
+++ b/test/fixtures/rules/std_rule_cases/CP03.yml
@@ -1,4 +1,4 @@
-rule: L030
+rule: CP03
 
 # Inconsistent capitalisation of functions
 test_fail_inconsistent_function_capitalisation_1:
@@ -10,7 +10,7 @@ test_fail_inconsistent_function_capitalisation_2:
   fix_str: SELECT max(id), min(id) from table
   configs:
     rules:
-      L030:
+      capitalisation.functions:
         extended_capitalisation_policy: lower
 
 test_bare_functions:
@@ -18,7 +18,7 @@ test_bare_functions:
   fix_str: SELECT CURRENT_TIMESTAMP from table
   configs:
     rules:
-      L030:
+      capitalisation.functions:
         extended_capitalisation_policy: upper
 
 test_bare_functions_2:
@@ -26,7 +26,7 @@ test_bare_functions_2:
   fix_str: SELECT CURRENT_TIMESTAMP, MIN(a) from table
   configs:
     rules:
-      L030:
+      capitalisation.functions:
         extended_capitalisation_policy: upper
 
 test_bare_functions_3:
@@ -34,7 +34,7 @@ test_bare_functions_3:
   fix_str: SELECT Current_Timestamp, Min(a) from table
   configs:
     rules:
-      L030:
+      capitalisation.functions:
         extended_capitalisation_policy: pascal
 
 test_fail_capitalization_after_comma:
@@ -51,14 +51,30 @@ test_pass_ignore_word:
   pass_str: SELECT MAX(id), min(id) FROM TABLE1
   configs:
     rules:
-      L030:
+      capitalisation.functions:
         ignore_words: min
 
-test_pass_ignore_templated_code:
+test_pass_ignore_templated_code_true:
   pass_str: |
     SELECT
         {{ "greatest(a, b)" }},
         GREATEST(i, j)
+  configs:
+    core:
+      ignore_templated_areas: true
+
+test_fail_ignore_templated_code_false:
+  fail_str: |
+    SELECT
+        {{ "greatest(a, b)" }},
+        GREATEST(i, j)
+  fix_str: |
+    SELECT
+        {{ "greatest(a, b)" }},
+        greatest(i, j)
+  configs:
+    core:
+      ignore_templated_areas: false
 
 test_pass_func_name_templated_literal_mix:
   # Issue 3022. This was actually a bug in BaseSegment.iter_patches().
@@ -68,14 +84,14 @@ test_pass_ignore_words_regex_simple:
   pass_str: SELECT MAX(id), f_test_udf(id) FROM TABLE1
   configs:
     rules:
-      L030:
+      capitalisation.functions:
         ignore_words_regex: ^f_
 
 test_pass_ignore_words_regex_complex:
   pass_str: SELECT MAX(id), f_test_udf(id), g_test_udf(id) FROM TABLE1
   configs:
     rules:
-      L030:
+      capitalisation.functions:
         ignore_words_regex: (^f_|^g_)
 
 test_pass_ignore_words_regex_bigquery_simple:
@@ -84,7 +100,7 @@ test_pass_ignore_words_regex_bigquery_simple:
     core:
       dialect: bigquery
     rules:
-      L030:
+      capitalisation.functions:
         ignore_words_regex: ^_f_
 
 test_pass_ignore_words_regex_bigquery_complex:
@@ -93,5 +109,5 @@ test_pass_ignore_words_regex_bigquery_complex:
     core:
       dialect: bigquery
     rules:
-      L030:
+      capitalisation.functions:
         ignore_words_regex: (^_f_|\._f_)
diff --git a/test/fixtures/rules/std_rule_cases/L040.yml b/test/fixtures/rules/std_rule_cases/CP04.yml
similarity index 84%
rename from test/fixtures/rules/std_rule_cases/L040.yml
rename to test/fixtures/rules/std_rule_cases/CP04.yml
index e79c6e8..5da34e1 100644
--- a/test/fixtures/rules/std_rule_cases/L040.yml
+++ b/test/fixtures/rules/std_rule_cases/CP04.yml
@@ -1,4 +1,4 @@
-rule: L040
+rule: CP04
 
 test_fail_inconsistent_boolean_capitalisation:
   fail_str: SeLeCt true, FALSE, NULL
@@ -8,5 +8,5 @@ test_pass_ignore_word:
   pass_str: SELECT true, FALSE, NULL
   configs:
     rules:
-      L040:
+      capitalisation.literals:
         ignore_words: true
diff --git a/test/fixtures/rules/std_rule_cases/L063.yml b/test/fixtures/rules/std_rule_cases/CP05.yml
similarity index 92%
rename from test/fixtures/rules/std_rule_cases/L063.yml
rename to test/fixtures/rules/std_rule_cases/CP05.yml
index 26735db..511358a 100644
--- a/test/fixtures/rules/std_rule_cases/L063.yml
+++ b/test/fixtures/rules/std_rule_cases/CP05.yml
@@ -1,4 +1,4 @@
-rule: L063
+rule: CP05
 
 test_pass_default_consistent_lower:
   # Test that we don't have the "inconsistent" bug
@@ -42,7 +42,7 @@ test_fail_data_type_inconsistent_capitalisation_1:
   fix_str: CREATE TABLE table1 (account_id BIGINT);
   configs:
     rules:
-      L063:
+      capitalisation.types:
         extended_capitalisation_policy: upper
 
 test_fail_data_type_inconsistent_capitalisation_2:
@@ -50,7 +50,7 @@ test_fail_data_type_inconsistent_capitalisation_2:
   fix_str: CREATE TABLE table1 (account_id bigint);
   configs:
     rules:
-      L063:
+      capitalisation.types:
         extended_capitalisation_policy: lower
 
 test_fail_data_type_inconsistent_capitalisation_3:
@@ -58,7 +58,7 @@ test_fail_data_type_inconsistent_capitalisation_3:
   fix_str: CREATE TABLE table1 (account_id Bigint);
   configs:
     rules:
-      L063:
+      capitalisation.types:
         extended_capitalisation_policy: capitalise
 
 test_fail_data_type_capitalisation_policy_lower:
@@ -66,7 +66,7 @@ test_fail_data_type_capitalisation_policy_lower:
   fix_str: CREATE TABLE table1 (account_id bigint);
   configs:
     rules:
-      L063:
+      capitalisation.types:
         extended_capitalisation_policy: lower
 
 test_fail_data_type_capitalisation_policy_lower_2:
@@ -74,7 +74,7 @@ test_fail_data_type_capitalisation_policy_lower_2:
   fix_str: CREATE TABLE table1 (account_id bigint, column_two varchar(255));
   configs:
     rules:
-      L063:
+      capitalisation.types:
         extended_capitalisation_policy: lower
 
 test_fail_data_type_capitalisation_policy_upper:
@@ -82,7 +82,7 @@ test_fail_data_type_capitalisation_policy_upper:
   fix_str: CREATE TABLE table1 (account_id BIGINT);
   configs:
     rules:
-      L063:
+      capitalisation.types:
         extended_capitalisation_policy: upper
 
 test_fail_data_type_capitalisation_policy_upper_2:
@@ -90,7 +90,7 @@ test_fail_data_type_capitalisation_policy_upper_2:
   fix_str: CREATE TABLE table1 (account_id BIGINT, column_two VARCHAR(255));
   configs:
     rules:
-      L010:
+      capitalisation.types:
         extended_capitalisation_policy: upper
 
 test_fail_data_type_capitalisation_policy_capitalise:
@@ -99,7 +99,7 @@ test_fail_data_type_capitalisation_policy_capitalise:
   fix_str: CREATE TABLE table1 (account_id Bigint);
   configs:
     rules:
-      L063:
+      capitalisation.types:
         extended_capitalisation_policy: capitalise
 
 test_fail_data_type_capitalisation_policy_keywords_1:
@@ -109,7 +109,7 @@ test_fail_data_type_capitalisation_policy_keywords_1:
   fix_str: CREATE TABLE table1 (account_id BIGINT, column_two TIMESTAMP);
   configs:
     rules:
-      L063:
+      capitalisation.types:
         extended_capitalisation_policy: upper
 
 test_fail_data_type_capitalisation_policy_keywords_2:
@@ -117,7 +117,7 @@ test_fail_data_type_capitalisation_policy_keywords_2:
   fix_str: CREATE TABLE table1 (account_id BIGINT, column_two TIMESTAMP WITH TIME ZONE);
   configs:
     rules:
-      L063:
+      capitalisation.types:
         extended_capitalisation_policy: upper
 
 test_pass_sparksql_complex_data_types:
@@ -131,7 +131,7 @@ test_pass_sparksql_complex_data_types:
     core:
       dialect: sparksql
     rules:
-      L063:
+      capitalisation.types:
         extended_capitalisation_policy: upper
 
 test_pass_bigquery_struct_params:
@@ -144,7 +144,7 @@ test_pass_bigquery_struct_params:
     core:
       dialect: bigquery
     rules:
-      L063:
+      capitalisation.types:
         extended_capitalisation_policy: upper
 
 # See https://github.com/sqlfluff/sqlfluff/issues/3277
@@ -160,5 +160,5 @@ test_pass_typless_structs_dont_trigger_rule:
     core:
       dialect: bigquery
     rules:
-      L063:
+      capitalisation.types:
         extended_capitalisation_policy: upper
diff --git a/test/fixtures/rules/std_rule_cases/L061.yml b/test/fixtures/rules/std_rule_cases/CV01.yml
similarity index 98%
rename from test/fixtures/rules/std_rule_cases/L061.yml
rename to test/fixtures/rules/std_rule_cases/CV01.yml
index 1c12dcc..561632a 100644
--- a/test/fixtures/rules/std_rule_cases/L061.yml
+++ b/test/fixtures/rules/std_rule_cases/CV01.yml
@@ -1,4 +1,4 @@
-rule: L061
+rule: CV01
 
 test_pass_not_equal_to:
   pass_str: |
diff --git a/test/fixtures/rules/std_rule_cases/L060.yml b/test/fixtures/rules/std_rule_cases/CV02.yml
similarity index 97%
rename from test/fixtures/rules/std_rule_cases/L060.yml
rename to test/fixtures/rules/std_rule_cases/CV02.yml
index d7060ee..626c01b 100644
--- a/test/fixtures/rules/std_rule_cases/L060.yml
+++ b/test/fixtures/rules/std_rule_cases/CV02.yml
@@ -1,4 +1,4 @@
-rule: L060
+rule: CV02
 
 test_pass_coalesce:
   pass_str: |
diff --git a/test/fixtures/rules/std_rule_cases/CV03.yml b/test/fixtures/rules/std_rule_cases/CV03.yml
new file mode 100644
index 0000000..dec12d6
--- /dev/null
+++ b/test/fixtures/rules/std_rule_cases/CV03.yml
@@ -0,0 +1,57 @@
+rule: CV03
+
+test_require_pass:
+  pass_str: SELECT a, b, FROM foo
+  configs:
+    rules:
+      convention.select_trailing_comma:
+        select_clause_trailing_comma: require
+
+test_require_fail:
+  fail_str: SELECT a, b FROM foo
+  fix_str: SELECT a, b, FROM foo
+  configs:
+    rules:
+      convention.select_trailing_comma:
+        select_clause_trailing_comma: require
+
+
+test_forbid_pass:
+  pass_str: SELECT a, b FROM foo
+  configs:
+    rules:
+      convention.select_trailing_comma:
+        select_clause_trailing_comma: forbid
+
+test_forbid_fail:
+  fail_str: SELECT a, b, FROM foo
+  fix_str: SELECT a, b FROM foo
+  configs:
+    rules:
+      convention.select_trailing_comma:
+        select_clause_trailing_comma: forbid
+
+test_fail_templated:
+  # NOTE: Check no fix, because it's not safe.
+  fail_str: |
+    SELECT
+        {% for col in ['a', 'b', 'c'] %}
+            {{col}},
+        {% endfor %}
+    FROM tbl
+  fix_str: |
+    SELECT
+        {% for col in ['a', 'b', 'c'] %}
+            {{col}},
+        {% endfor %}
+    FROM tbl
+  violations_after_fix:
+  - code: CV03
+    description: Trailing comma in select statement forbidden
+    line_no: 3
+    line_pos: 16
+    name: "convention.select_trailing_comma"
+  configs:
+    rules:
+      convention.select_trailing_comma:
+        select_clause_trailing_comma: forbid
diff --git a/test/fixtures/rules/std_rule_cases/L047.yml b/test/fixtures/rules/std_rule_cases/CV04.yml
similarity index 96%
rename from test/fixtures/rules/std_rule_cases/L047.yml
rename to test/fixtures/rules/std_rule_cases/CV04.yml
index 3a7f9ae..4653529 100644
--- a/test/fixtures/rules/std_rule_cases/L047.yml
+++ b/test/fixtures/rules/std_rule_cases/CV04.yml
@@ -1,4 +1,4 @@
-rule: L047
+rule: CV04
 
 passes_on_count_star:
   pass_str: |
@@ -20,7 +20,7 @@ passes_on_count_1:
 
   configs: &prefer_count_1
     rules:
-      L047:
+      convention.count_rows:
         prefer_count_1: true
 
 changes_count_0_to_count_star:
@@ -51,7 +51,7 @@ passes_on_count_0:
 
   configs: &prefer_count_0
     rules:
-      L047:
+      convention.count_rows:
         prefer_count_0: true
 
 passes_on_count_1_if_both_present:
@@ -65,7 +65,7 @@ passes_on_count_1_if_both_present:
 
   configs: &prefer_both
     rules:
-      L047:
+      convention.count_rows:
         prefer_count_0: true
         prefer_count_1: true
 
diff --git a/test/fixtures/rules/std_rule_cases/L049.yml b/test/fixtures/rules/std_rule_cases/CV05.yml
similarity index 82%
rename from test/fixtures/rules/std_rule_cases/L049.yml
rename to test/fixtures/rules/std_rule_cases/CV05.yml
index bdba108..980c6f8 100644
--- a/test/fixtures/rules/std_rule_cases/L049.yml
+++ b/test/fixtures/rules/std_rule_cases/CV05.yml
@@ -1,4 +1,4 @@
-rule: L049
+rule: CV05
 
 test_is_null:
   pass_str: |
@@ -77,6 +77,15 @@ test_set_clause:
     UPDATE table1 SET col = NULL
     WHERE col = ""
 
+test_bigquery_set_options:
+  pass_str: |
+    ALTER TABLE table
+    SET OPTIONS (expiration_timestamp = NULL)
+    ;
+  configs:
+    core:
+      dialect: bigquery
+
 test_tsql_exec_clause:
   pass_str: |
     exec something
@@ -96,3 +105,10 @@ test_tsql_alternate_alias_syntax:
   configs:
     core:
       dialect: tsql
+
+test_exclude_constraint:
+  pass_str: |
+    alter table abc add constraint xyz exclude (field WITH =);
+  configs:
+    core:
+      dialect: postgres
diff --git a/test/fixtures/rules/std_rule_cases/L052.yml b/test/fixtures/rules/std_rule_cases/CV06.yml
similarity index 86%
rename from test/fixtures/rules/std_rule_cases/L052.yml
rename to test/fixtures/rules/std_rule_cases/CV06.yml
index efaab7a..c6c6436 100644
--- a/test/fixtures/rules/std_rule_cases/L052.yml
+++ b/test/fixtures/rules/std_rule_cases/CV06.yml
@@ -1,4 +1,4 @@
-rule: L052
+rule: CV06
 
 test_pass_semi_colon_same_line_default:
   pass_str: |
@@ -9,7 +9,7 @@ test_pass_semi_colon_custom_newline:
     SELECT a FROM foo;
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_fail_semi_colon_same_line_custom_newline:
@@ -22,7 +22,7 @@ test_fail_semi_colon_same_line_custom_newline:
     ;
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_pass_no_semi_colon_default:
@@ -34,7 +34,7 @@ test_pass_no_semi_colon_custom_newline:
     SELECT a FROM foo
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_fail_no_semi_colon_custom_require:
@@ -44,7 +44,7 @@ test_fail_no_semi_colon_custom_require:
     SELECT a FROM foo;
   configs:
     rules:
-      L052:
+      convention.terminator:
         require_final_semicolon: true
 
 test_fail_no_semi_colon_custom_require_oneline:
@@ -54,7 +54,7 @@ test_fail_no_semi_colon_custom_require_oneline:
     SELECT a FROM foo;
   configs:
     rules:
-      L052:
+      convention.terminator:
         require_final_semicolon: true
         multiline_newline: true
 
@@ -68,7 +68,7 @@ test_fail_no_semi_colon_custom_require_multiline:
     ;
   configs:
     rules:
-      L052:
+      convention.terminator:
         require_final_semicolon: true
         multiline_newline: true
 
@@ -97,7 +97,7 @@ test_fail_multi_statement_semi_colon_custom_multiline:
     ;
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_pass_multi_statement_no_trailing_semi_colon_default:
@@ -114,7 +114,7 @@ test_pass_multi_statement_no_trailing_semi_colon_custom_require:
     SELECT b FROM bar;
   configs:
     rules:
-      L052:
+      convention.terminator:
         require_final_semicolon: true
 
 test_fail_multi_statement_no_trailing_semi_colon_custom_require_oneline:
@@ -126,7 +126,7 @@ test_fail_multi_statement_no_trailing_semi_colon_custom_require_oneline:
     SELECT b FROM bar;
   configs:
     rules:
-      L052:
+      convention.terminator:
         require_final_semicolon: true
         multiline_newline: true
 
@@ -145,7 +145,7 @@ test_fail_multi_statement_no_trailing_semi_colon_custom_require_multiline:
     ;
   configs:
     rules:
-      L052:
+      convention.terminator:
         require_final_semicolon: true
         multiline_newline: true
 
@@ -169,7 +169,7 @@ test_pass_newline_semi_colon_custom_newline:
     ;
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_fail_multi_statement_semi_colon_default:
@@ -199,7 +199,7 @@ test_fail_multi_statement_semi_colon_custom_require_multiline:
     ;
   configs:
     rules:
-      L052:
+      convention.terminator:
         require_final_semicolon: true
         multiline_newline: true
 
@@ -215,7 +215,7 @@ test_fail_multiple_newlines_semi_colon_custom_require_newline:
     ;
   configs:
     rules:
-      L052:
+      convention.terminator:
         require_final_semicolon: true
         multiline_newline: true
 
@@ -226,7 +226,7 @@ test_fail_final_semi_colon_same_line_inline_comment:
     SELECT a FROM foo; -- inline comment
   configs:
     rules:
-      L052:
+      convention.terminator:
         require_final_semicolon: true
 
 test_fail_final_semi_colon_same_line_inline_comment_custom_oneline:
@@ -236,7 +236,7 @@ test_fail_final_semi_colon_same_line_inline_comment_custom_oneline:
     SELECT a FROM foo; -- inline comment
   configs:
     rules:
-      L052:
+      convention.terminator:
         require_final_semicolon: true
         multiline_newline: true
 
@@ -250,7 +250,7 @@ test_fail_final_semi_colon_newline_inline_comment_custom_multiline:
     ;
   configs:
     rules:
-      L052:
+      convention.terminator:
         require_final_semicolon: true
         multiline_newline: true
 
@@ -279,7 +279,7 @@ test_pass_newline_inline_comment:
     ;
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_fail_newline_inline_comment:
@@ -294,7 +294,7 @@ test_fail_newline_inline_comment:
     ;
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_fail_newline_multiple_inline_comments_custom_oneline:
@@ -310,7 +310,7 @@ test_fail_newline_multiple_inline_comments_custom_oneline:
     -- inline comment #2
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_fail_newline_multiple_inline_comments_custom_multiline:
@@ -329,7 +329,7 @@ test_fail_newline_multiple_inline_comments_custom_multiline:
     -- inline comment #2
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_fail_newline_trailing_inline_comment:
@@ -342,7 +342,7 @@ test_fail_newline_trailing_inline_comment:
     ;
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_fail_newline_preceding_block_comment_custom_oneline:
@@ -357,7 +357,7 @@ test_fail_newline_preceding_block_comment_custom_oneline:
     */
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_fail_newline_preceding_block_comment_custom_multiline:
@@ -375,7 +375,7 @@ test_fail_newline_preceding_block_comment_custom_multiline:
     */
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_fail_newline_trailing_block_comment:
@@ -392,7 +392,7 @@ test_fail_newline_trailing_block_comment:
     */
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_fail_newline_block_comment_semi_colon_before:
@@ -411,7 +411,7 @@ test_fail_newline_block_comment_semi_colon_before:
     */
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_fail_newline_block_comment_semi_colon_after:
@@ -431,7 +431,7 @@ test_fail_newline_block_comment_semi_colon_after:
     */
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_fail_newline_create_table:
@@ -447,7 +447,7 @@ test_fail_newline_create_table:
     ;
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_fail_newline_create_table_inline_comment:
@@ -462,7 +462,7 @@ test_fail_newline_create_table_inline_comment:
     ;
   configs:
     rules:
-      L052:
+      convention.terminator:
         multiline_newline: true
 
 test_fail_whitespace_after_simple_select:
@@ -492,5 +492,27 @@ test_fail_templated_fix_crosses_block_boundary:
     ;
   configs:
     rules:
-      L052:
+      convention.terminator:
+        require_final_semicolon: true
+
+test_pass_empty_file:
+  pass_str: ""
+
+test_pass_empty_file_with_require_final_semicolon:
+  pass_str: ""
+  configs:
+    rules:
+      convention.terminator:
+        require_final_semicolon: true
+
+test_pass_file_with_only_comments:
+  pass_str: |
+    -- just an empty file
+
+test_pass_file_with_only_comments_with_require_final_semicolon:
+  pass_str: |
+    -- just an empty file
+  configs:
+    rules:
+      convention.terminator:
         require_final_semicolon: true
diff --git a/test/fixtures/rules/std_rule_cases/L053.yml b/test/fixtures/rules/std_rule_cases/CV07.yml
similarity index 97%
rename from test/fixtures/rules/std_rule_cases/L053.yml
rename to test/fixtures/rules/std_rule_cases/CV07.yml
index 80f6c75..157a81b 100644
--- a/test/fixtures/rules/std_rule_cases/L053.yml
+++ b/test/fixtures/rules/std_rule_cases/CV07.yml
@@ -1,4 +1,4 @@
-rule: L053
+rule: CV07
 
 test_pass_no_outer_brackets:
   pass_str: |
@@ -69,12 +69,12 @@ test_pass_begin_end_statement_brackets_tsql:
 test_fail_leading_trailing_whitespace:
   # This previously caused the post-fix parse check to fail.
   fail_str: "(\n    SELECT\n        foo,\n        bar,\n        baz\n    FROM mycte2\n);\n"
-  # Yes, the formatting looks bad, but that's because we're only running L053
+  # Yes, the formatting looks bad, but that's because we're only running CV07
   # here. In the real world, other rules will tidy up the formatting.
   fix_str: "\n    SELECT\n        foo,\n        bar,\n        baz\n    FROM mycte2\n;\n"
 
 test_fail_leading_whitespace_and_comment:
   fail_str: "( -- This\n    SELECT\n        foo,\n        bar,\n        baz\n    FROM mycte2\n)\n"
-  # Yes, the formatting looks bad, but that's because we're only running L053
+  # Yes, the formatting looks bad, but that's because we're only running CV07
   # here. In the real world, other rules will tidy up the formatting.
   fix_str: " -- This\n    SELECT\n        foo,\n        bar,\n        baz\n    FROM mycte2\n\n"
diff --git a/test/fixtures/rules/std_rule_cases/L055.yml b/test/fixtures/rules/std_rule_cases/CV08.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L055.yml
rename to test/fixtures/rules/std_rule_cases/CV08.yml
index 784065d..5aebcf8 100644
--- a/test/fixtures/rules/std_rule_cases/L055.yml
+++ b/test/fixtures/rules/std_rule_cases/CV08.yml
@@ -1,4 +1,4 @@
-rule: L055
+rule: CV08
 
 test_fail_right_join:
   fail_str: |
diff --git a/test/fixtures/rules/std_rule_cases/L062.yml b/test/fixtures/rules/std_rule_cases/CV09.yml
similarity index 71%
rename from test/fixtures/rules/std_rule_cases/L062.yml
rename to test/fixtures/rules/std_rule_cases/CV09.yml
index 7fdd0af..37141dd 100644
--- a/test/fixtures/rules/std_rule_cases/L062.yml
+++ b/test/fixtures/rules/std_rule_cases/CV09.yml
@@ -1,4 +1,4 @@
-rule: L062
+rule: CV09
 
 test_pass_default_none:
   pass_str: |
@@ -9,7 +9,7 @@ test_fail_deny_word:
     SELECT col1 FROM deprecated_table
   configs:
     rules:
-      L062:
+      convention.blocked_words:
         blocked_words: deprecated_table
 
 test_fail_deny_word_case_difference1:
@@ -17,7 +17,7 @@ test_fail_deny_word_case_difference1:
     SELECT col1 FROM deprecated_table
   configs:
     rules:
-      L062:
+      convention.blocked_words:
         blocked_words: Deprecated_Table
 
 test_fail_deny_word_case_difference2:
@@ -25,7 +25,7 @@ test_fail_deny_word_case_difference2:
     SELECT col1 FROM Deprecated_Table
   configs:
     rules:
-      L062:
+      convention.blocked_words:
         blocked_words: deprecated_table
 
 test_fail_multiple_deny_words1:
@@ -33,7 +33,7 @@ test_fail_multiple_deny_words1:
     SELECT myOldFunction(col1) FROM table1
   configs:
     rules:
-      L062:
+      convention.blocked_words:
         blocked_words: deprecated_table,myoldFunction
 
 test_fail_multiple_deny_words2:
@@ -41,7 +41,7 @@ test_fail_multiple_deny_words2:
     SELECT col1 FROM deprecated_table
   configs:
     rules:
-      L062:
+      convention.blocked_words:
         blocked_words: deprecated_table,myoldFunction
 
 test_pass_not_complete_match:
@@ -49,7 +49,7 @@ test_pass_not_complete_match:
     SELECT col1 FROM deprecated_table1
   configs:
     rules:
-      L062:
+      convention.blocked_words:
         blocked_words: deprecated_table
 
 test_pass_is_comment:
@@ -58,7 +58,7 @@ test_pass_is_comment:
     SELECT col1 FROM new_table
   configs:
     rules:
-      L062:
+      convention.blocked_words:
         blocked_words: deprecated_table
 
 test_pass_in_comment:
@@ -67,7 +67,7 @@ test_pass_in_comment:
     SELECT col1 FROM new_table
   configs:
     rules:
-      L062:
+      convention.blocked_words:
         blocked_words: deprecated_table
 
 test_fail_bool:
@@ -77,7 +77,7 @@ test_fail_bool:
     core:
       dialect: exasol
     rules:
-      L062:
+      convention.blocked_words:
         blocked_words: bool
 
 test_pass_bool:
@@ -87,7 +87,7 @@ test_pass_bool:
     core:
       dialect: exasol
     rules:
-      L062:
+      convention.blocked_words:
         blocked_words: bool
 
 test_pass_bigquery:
@@ -98,7 +98,7 @@ test_pass_bigquery:
     core:
       dialect: bigquery
     rules:
-      L062:
+      convention.blocked_words:
         blocked_regex: ^.*(2022_06_01|2022_05_01).*$
 
 test_fail_bigquery:
@@ -109,7 +109,7 @@ test_fail_bigquery:
     core:
       dialect: bigquery
     rules:
-      L062:
+      convention.blocked_words:
         blocked_regex: ^.*(2022_06_01|2022_05_01).*$
 
 test_fail_bigquery2:
@@ -120,7 +120,7 @@ test_fail_bigquery2:
     core:
       dialect: bigquery
     rules:
-      L062:
+      convention.blocked_words:
         blocked_regex: .*(2022_06_01|2022_05_01).*
 
 test_fail_bigquery3:
@@ -131,7 +131,7 @@ test_fail_bigquery3:
     core:
       dialect: bigquery
     rules:
-      L062:
+      convention.blocked_words:
         blocked_regex: (2022_06_01|2022_05_01)
 
 test_pass_comment_word1:
@@ -143,7 +143,7 @@ test_pass_comment_word1:
     core:
       dialect: bigquery
     rules:
-      L062:
+      convention.blocked_words:
         blocked_words: TABLESAMPLE
 
 test_pass_comment_word2:
@@ -155,7 +155,7 @@ test_pass_comment_word2:
     core:
       dialect: bigquery
     rules:
-      L062:
+      convention.blocked_words:
         blocked_words: TABLESAMPLE
 
 test_pass_comment_word3:
@@ -169,7 +169,7 @@ test_pass_comment_word3:
     core:
       dialect: bigquery
     rules:
-      L062:
+      convention.blocked_words:
         blocked_words: TABLESAMPLE
 
 test_pass_comment_regex1:
@@ -181,7 +181,7 @@ test_pass_comment_regex1:
     core:
       dialect: bigquery
     rules:
-      L062:
+      convention.blocked_words:
         blocked_regex: (TABLESAMPLE)
 
 test_pass_comment_regex2:
@@ -193,7 +193,7 @@ test_pass_comment_regex2:
     core:
       dialect: bigquery
     rules:
-      L062:
+      convention.blocked_words:
         blocked_regex: (TABLESAMPLE)
 
 test_pass_comment_regex3:
@@ -207,5 +207,41 @@ test_pass_comment_regex3:
     core:
       dialect: bigquery
     rules:
-      L062:
+      convention.blocked_words:
         blocked_regex: (TABLESAMPLE)
+
+test_pass_match_source1:
+  pass_str: |
+    SELECT * FROM {{ ref('deprecated_table') }}
+  configs:
+    core:
+      templater: jinja
+      ignore_templated_areas: true
+    rules:
+      convention.blocked_words:
+        blocked_regex: ref\('deprecated_
+        match_source: true
+
+test_pass_match_source2:
+  pass_str: |
+    SELECT * FROM {{ ref('deprecated_table') }}
+  configs:
+    core:
+      templater: jinja
+      ignore_templated_areas: false
+    rules:
+      convention.blocked_words:
+        blocked_regex: ref\('deprecated_
+        match_source: false
+
+test_fail_match_source1:
+  fail_str: |
+    SELECT * FROM {{ ref('deprecated_table') }}
+  configs:
+    core:
+      templater: jinja
+      ignore_templated_areas: false
+    rules:
+      convention.blocked_words:
+        blocked_regex: ref\('deprecated_
+        match_source: true
diff --git a/test/fixtures/rules/std_rule_cases/L064.yml b/test/fixtures/rules/std_rule_cases/CV10.yml
similarity index 90%
rename from test/fixtures/rules/std_rule_cases/L064.yml
rename to test/fixtures/rules/std_rule_cases/CV10.yml
index b6e0980..0a9ef50 100644
--- a/test/fixtures/rules/std_rule_cases/L064.yml
+++ b/test/fixtures/rules/std_rule_cases/CV10.yml
@@ -1,4 +1,4 @@
-rule: L064
+rule: CV10
 
 test_fail_result_of_fix_is_valid_bigquery:
   fail_str: |
@@ -59,7 +59,7 @@ test_pass_preferred_tripple_quotes:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_fail_alternate_tripple_quotes:
@@ -71,7 +71,7 @@ test_fail_alternate_tripple_quotes:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_fail_unnecessary_escaping:
@@ -87,7 +87,7 @@ test_fail_unnecessary_escaping:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_fail_bigquery_string_prefixes:
@@ -107,7 +107,7 @@ test_fail_bigquery_string_prefixes:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_fail_bigquery_string_prefixes_when_style_is_consistent:
@@ -149,7 +149,7 @@ test_fail_tripple_quoted_strings_dont_remove_escapes_single_quotes:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: single_quotes
 
 test_fail_tripple_quoted_strings_dont_remove_escapes_double_quotes:
@@ -165,7 +165,7 @@ test_fail_tripple_quoted_strings_dont_remove_escapes_double_quotes:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_fail_edge_case_tripple_quoted_string_ending_with_double_quote:
@@ -182,7 +182,7 @@ test_fail_edge_case_tripple_quoted_string_ending_with_double_quote:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_pass_lots_of_quotes:
@@ -195,7 +195,7 @@ test_pass_lots_of_quotes:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_fail_lots_of_quotes:
@@ -208,7 +208,7 @@ test_fail_lots_of_quotes:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_fail_quote_replace_in_raw_strings:
@@ -225,7 +225,7 @@ test_fail_quote_replace_in_raw_strings:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_pass_dollar_quoted_strings_are_ignored:
@@ -238,7 +238,7 @@ test_pass_dollar_quoted_strings_are_ignored:
     core:
       dialect: postgres
     rules:
-      L064:
+      convention.quoted_literals:
         force_enable: true
         preferred_quoted_literal_style: single_quotes
 
@@ -256,7 +256,7 @@ test_pass_date_constructor_strings_are_ignored_2:
         DATE'some string'
   configs:
     rules:
-      L064:
+      convention.quoted_literals:
         force_enable: true
         preferred_quoted_literal_style: double_quotes
 
@@ -267,7 +267,7 @@ test_pass_empty_string:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_fail_empty_string:
@@ -279,7 +279,7 @@ test_fail_empty_string:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_pass_partially_templated_quoted_literals_simple:
@@ -289,7 +289,7 @@ test_pass_partially_templated_quoted_literals_simple:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_fail_partially_templated_quoted_literals_simple:
@@ -299,7 +299,7 @@ test_fail_partially_templated_quoted_literals_simple:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_pass_partially_templated_quoted_literals_complex:
@@ -309,7 +309,7 @@ test_pass_partially_templated_quoted_literals_complex:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_fail_partially_templated_quoted_literals_complex:
@@ -319,7 +319,7 @@ test_fail_partially_templated_quoted_literals_complex:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_pass_partially_templated_quoted_literals_with_multiple_templates:
@@ -329,7 +329,7 @@ test_pass_partially_templated_quoted_literals_with_multiple_templates:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_fail_partially_templated_quoted_literals_with_multiple_templates:
@@ -339,7 +339,7 @@ test_fail_partially_templated_quoted_literals_with_multiple_templates:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_fail_partially_templated_quoted_literals_inside_blocks:
@@ -352,7 +352,7 @@ test_fail_partially_templated_quoted_literals_inside_blocks:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_pass_fully_templated_quoted_literals_are_ignored:
@@ -362,7 +362,7 @@ test_pass_fully_templated_quoted_literals_are_ignored:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_pass_partially_templated_literals_are_ignored_when_some_quotes_are_inside_the_template_1:
@@ -372,7 +372,7 @@ test_pass_partially_templated_literals_are_ignored_when_some_quotes_are_inside_t
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_pass_partially_templated_literals_are_ignored_when_some_quotes_are_inside_the_template_2:
@@ -382,7 +382,7 @@ test_pass_partially_templated_literals_are_ignored_when_some_quotes_are_inside_t
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
 
 test_pass_prefix_chars_are_correctly_detected_as_unlintable:
@@ -394,5 +394,5 @@ test_pass_prefix_chars_are_correctly_detected_as_unlintable:
     core:
       dialect: bigquery
     rules:
-      L064:
+      convention.quoted_literals:
         preferred_quoted_literal_style: double_quotes
diff --git a/test/fixtures/rules/std_rule_cases/L067.yml b/test/fixtures/rules/std_rule_cases/CV11.yml
similarity index 80%
rename from test/fixtures/rules/std_rule_cases/L067.yml
rename to test/fixtures/rules/std_rule_cases/CV11.yml
index 6183e20..1351d0e 100644
--- a/test/fixtures/rules/std_rule_cases/L067.yml
+++ b/test/fixtures/rules/std_rule_cases/CV11.yml
@@ -1,4 +1,4 @@
-rule: L067
+rule: CV11
 
 test_pass_cast:
   pass_str: |
@@ -48,7 +48,7 @@ test_fail_cast_with_comment_when_config_is_set_to_convert:
           */ as int) as bar;
   configs:
     rules:
-      L067:
+      convention.casting_style:
         preferred_type_casting_style: convert
 
 test_fail_cast_with_comment_when_config_is_set_to_shorthand:
@@ -59,7 +59,7 @@ test_fail_cast_with_comment_when_config_is_set_to_shorthand:
           */ as int) as bar;
   configs:
     rules:
-      L067:
+      convention.casting_style:
         preferred_type_casting_style: shorthand
 
 test_fail_3_argument_convert_when_config_is_set_to_cast:
@@ -68,7 +68,7 @@ test_fail_3_argument_convert_when_config_is_set_to_cast:
     from foo;
   configs:
     rules:
-      L067:
+      convention.casting_style:
         preferred_type_casting_style: cast
 
 test_fail_3_argument_convert_when_config_is_set_to_shorthand:
@@ -77,7 +77,7 @@ test_fail_3_argument_convert_when_config_is_set_to_shorthand:
     from foo;
   configs:
     rules:
-      L067:
+      convention.casting_style:
         preferred_type_casting_style: shorthand
 
 test_fail_inconsistent_type_casting_prior_convert:
@@ -226,7 +226,7 @@ test_fail_inconsistent_type_casting_when_config_cast:
     from foo;
   configs:
     rules:
-      L067:
+      convention.casting_style:
         preferred_type_casting_style: cast
 
 test_fail_inconsistent_type_casting_3_arguments_convert_when_config_cast:
@@ -243,14 +243,15 @@ test_fail_inconsistent_type_casting_3_arguments_convert_when_config_cast:
         cast(10 as text) as coo
     from foo;
   violations_after_fix:
-  - code: L067
+  - code: CV11
     description: Used type casting style is different from the preferred type
                  casting style.
     line_no: 2
     line_pos: 5
+    name: "convention.casting_style"
   configs:
     rules:
-      L067:
+      convention.casting_style:
         preferred_type_casting_style: cast
 
 test_fail_inconsistent_type_casting_when_config_convert:
@@ -268,7 +269,7 @@ test_fail_inconsistent_type_casting_when_config_convert:
     from foo;
   configs:
     rules:
-      L067:
+      convention.casting_style:
         preferred_type_casting_style: convert
 
 test_fail_inconsistent_type_casting_when_config_shorthand:
@@ -286,7 +287,7 @@ test_fail_inconsistent_type_casting_when_config_shorthand:
     from foo;
   configs:
     rules:
-      L067:
+      convention.casting_style:
         preferred_type_casting_style: shorthand
 
 test_fail_inconsistent_type_casting_3_arguments_convert_when_config_shorthand:
@@ -303,14 +304,15 @@ test_fail_inconsistent_type_casting_3_arguments_convert_when_config_shorthand:
         10::text as coo
     from foo;
   violations_after_fix:
-  - code: L067
+  - code: CV11
     description: Used type casting style is different from the preferred type
                  casting style.
     line_no: 2
     line_pos: 5
+    name: "convention.casting_style"
   configs:
     rules:
-      L067:
+      convention.casting_style:
         preferred_type_casting_style: shorthand
 
 test_pass_when_dialect_is_teradata:
@@ -320,3 +322,50 @@ test_pass_when_dialect_is_teradata:
   configs:
     core:
       dialect: teradata
+
+test_fail_parenthesize_expression_when_config_shorthand_from_cast:
+  fail_str: |
+    select
+        id::int,
+        cast(calendar_date||' 11:00:00' as timestamp) as calendar_datetime
+    from foo;
+  fix_str: |
+    select
+        id::int,
+        (calendar_date||' 11:00:00')::timestamp as calendar_datetime
+    from foo;
+  configs:
+    rules:
+      convention.casting_style:
+        preferred_type_casting_style: shorthand
+
+test_fail_parenthesize_expression_when_config_shorthand_from_convert:
+  fail_str: |
+    select
+        id::int,
+        convert(timestamp, calendar_date||' 11:00:00') as calendar_datetime
+    from foo;
+  fix_str: |
+    select
+        id::int,
+        (calendar_date||' 11:00:00')::timestamp as calendar_datetime
+    from foo;
+  configs:
+    rules:
+      convention.casting_style:
+        preferred_type_casting_style: shorthand
+
+test_fail_snowflake_semi_structured_cast_4453:
+  # https://github.com/sqlfluff/sqlfluff/issues/4453
+  fail_str: |
+    select (trim(value:Longitude::varchar))::double as longitude;
+    select col:a.b:c::varchar as bar;
+  fix_str: |
+    select cast((trim(cast(value:Longitude as varchar))) as double) as longitude;
+    select cast(col:a.b:c as varchar) as bar;
+  configs:
+    core:
+      dialect: snowflake
+    rules:
+      convention.casting_style:
+        preferred_type_casting_style: cast
diff --git a/test/fixtures/rules/std_rule_cases/L046.yml b/test/fixtures/rules/std_rule_cases/JJ01.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L046.yml
rename to test/fixtures/rules/std_rule_cases/JJ01.yml
index 119a076..c940b7a 100644
--- a/test/fixtures/rules/std_rule_cases/L046.yml
+++ b/test/fixtures/rules/std_rule_cases/JJ01.yml
@@ -1,4 +1,4 @@
-rule: L046
+rule: JJ01
 
 test_simple:
   pass_str: SELECT 1 from {{ ref('foo') }}
diff --git a/test/fixtures/rules/std_rule_cases/L002.yml b/test/fixtures/rules/std_rule_cases/L002.yml
deleted file mode 100644
index 66f8640..0000000
--- a/test/fixtures/rules/std_rule_cases/L002.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-rule: L002
-
-test_fail_mixed_tabs_and_spaces:
-  fail_str: "    \t    \t    SELECT 1"
-  fix_str: "                    SELECT 1"
diff --git a/test/fixtures/rules/std_rule_cases/L004.yml b/test/fixtures/rules/std_rule_cases/L004.yml
deleted file mode 100644
index c53719b..0000000
--- a/test/fixtures/rules/std_rule_cases/L004.yml
+++ /dev/null
@@ -1,133 +0,0 @@
-rule: L004
-
-spaces_pass_default:
-  pass_str: "    \nSELECT 1"
-
-
-spaces_fail:
-  fail_str: "    \nSELECT 1"
-  fix_str: "\t\nSELECT 1"
-  configs:
-    rules:
-      indent_unit: tab
-
-
-spaces_fail_custom_tab_space_size:
-  fail_str: "    \nSELECT 1"
-  fix_str: "\t\t\nSELECT 1"
-  configs:
-    rules:
-      indent_unit: tab
-      tab_space_size: 2
-
-
-spaces_fail_custom_tab_space_size_not_multiple:
-  fail_str: "     \nSELECT 1"
-  violations:
-  - description: 'Incorrect indentation type found in file. The number of spaces is
-    not a multiple of '
-    line_no: 1
-    line_pos: 1
-  configs:
-    rules:
-      indent_unit: tab
-      tab_space_size: 2
-
-
-tabs_fail_default:
-  fail_str: "\t\tSELECT 1\n"
-  fix_str: "        SELECT 1\n"
-
-
-tabs_fail_default_set_tab_space_size:
-  fail_str: "\t\tSELECT 1\n"
-  fix_str: "    SELECT 1\n"
-  configs:
-    rules:
-      tab_space_size: 2
-
-
-tabs_pass:
-  pass_str: "\tSELECT 1"
-  configs:
-    rules:
-      indent_unit: tab
-
-
-mixed_indent_fail_default_tab_space_size:
-  fail_str: |2
-      	 select 1
-  fix_str: |2
-           select 1
-
-
-mixed_indent_fail_custom_tab_space_size:
-  fail_str: |2
-      	 select 1
-  fix_str: |2
-         select 1
-  configs:
-    rules:
-      tab_space_size: 2
-
-
-indented_comments:
-  pass_str: |
-    SELECT
-        a,         -- Some comment
-        longer_col -- A lined up comment
-    FROM spam
-
-
-indented_comments_default_config:
-  fail_str: |
-    SELECT
-    	a,			-- Some comment
-    	longer_col	-- A lined up comment
-    FROM spam
-  # The rule will only fix the indent before the select targets.
-  # Here tab indent is replaced with spaces.
-  fix_str: |
-    SELECT
-        a,			-- Some comment
-        longer_col	-- A lined up comment
-    FROM spam
-  # Lint fails after fixing due to the spaces that come before the comments.
-  # That is left unfixable for now, as explained in:
-  # https://github.com/sqlfluff/sqlfluff/pull/590#issuecomment-739484190
-  violations_after_fix:
-    - description: Incorrect indentation type found in file. The indent occurs after other
-        text, so a manual fix is needed.
-      line_no: 2
-      line_pos: 7
-    - description: Incorrect indentation type found in file. The indent occurs after other
-        text, so a manual fix is needed.
-      line_no: 3
-      line_pos: 15
-
-
-indented_comments_tab_config:
-  fail_str: |
-    SELECT
-        a,         -- Some comment
-        longer_col -- A lined up comment
-    FROM spam
-  # The rule will only fix the indent before the select targets.
-  # Here spaces indent is replaced with tab.
-  fix_str: |
-    SELECT
-    	a,         -- Some comment
-    	longer_col -- A lined up comment
-    FROM spam
-  # Lint fails after fixing due to the spaces that come before the comments.
-  # That is left unfixable for now, as explained in:
-  # https://github.com/sqlfluff/sqlfluff/pull/590#issuecomment-739484190
-  violations_after_fix:
-  - code: L004
-    description: Incorrect indentation type found in file. The indent occurs after other
-      text, so a manual fix is needed.
-    line_no: 2
-    line_pos: 4
-  configs:
-    rules:
-      indent_unit: tab
diff --git a/test/fixtures/rules/std_rule_cases/L005.yml b/test/fixtures/rules/std_rule_cases/L005.yml
deleted file mode 100644
index 2d683da..0000000
--- a/test/fixtures/rules/std_rule_cases/L005.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-rule: L005
-
-test_fail_whitespace_before_comma:
-  fail_str: SELECT 1 ,4
-  fix_str: SELECT 1,4
diff --git a/test/fixtures/rules/std_rule_cases/L008.yml b/test/fixtures/rules/std_rule_cases/L008.yml
deleted file mode 100644
index 54a219e..0000000
--- a/test/fixtures/rules/std_rule_cases/L008.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-rule: L008
-
-test_pass_single_whitespace_after_comma:
-  pass_str: SELECT 1, 4
-
-test_fail_multiple_whitespace_after_comma:
-  fail_str: SELECT 1,   4
-  fix_str: SELECT 1, 4
-
-test_fail_no_whitespace_after_comma:
-  fail_str: SELECT 1,4
-  fix_str: SELECT 1, 4
-
-test_fail_no_whitespace_after_comma_2:
-  fail_str: SELECT FLOOR(dt) ,count(*) FROM test
-  fix_str: SELECT FLOOR(dt) , count(*) FROM test
-
-test_pass_bigquery_trailing_comma:
-  pass_str: SELECT 1, 2,
diff --git a/test/fixtures/rules/std_rule_cases/L016.yml b/test/fixtures/rules/std_rule_cases/L016.yml
deleted file mode 100644
index da51c29..0000000
--- a/test/fixtures/rules/std_rule_cases/L016.yml
+++ /dev/null
@@ -1,339 +0,0 @@
-rule: L016
-
-test_pass_line_too_long_config_override:
-  # Long lines (with config override)
-  pass_str: "SELECT COUNT(*) FROM tbl\n"
-  configs:
-    rules:
-      max_line_length: 30
-
-test_fail_line_too_long_with_comments_1:
-  # Check we move comments correctly
-  fail_str: "SELECT 1 -- Some Comment\n"
-
-  fix_str: "-- Some Comment\nSELECT 1\n"
-  configs:
-    rules:
-      max_line_length: 18
-
-test_fail_line_too_long_with_comments_2:
-  # Check we can add newlines after dedents (with an indent)
-  fail_str: "    SELECT COUNT(*) FROM tbl\n"
-  fix_str: "    SELECT\n        COUNT(*)\n    FROM tbl\n"
-  configs:
-    rules:
-      max_line_length: 20
-
-test_fail_line_too_long_with_comments_3:
-  # Check priority of fixes
-  fail_str: "SELECT COUNT(*) FROM tbl -- Some Comment\n"
-  fix_str: "-- Some Comment\nSELECT\n    COUNT(*)\nFROM tbl\n"
-  configs:
-    rules:
-      max_line_length: 18
-
-test_fail_line_too_long_with_comments_4:
-  # In this case, the inline comment is NOT on a line by itself (note the
-  # leading comma), but even if we move it onto a line by itself, it's still
-  # too long. In this case, the rule should do nothing, otherwise it triggers
-  # an endless cycle of "fixes" that simply keeps adding blank lines.
-  fail_str: |
-    SELECT
-    c1
-    ,--  the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line.
-    c2
-  configs:
-    rules:
-      max_line_length: 80
-
-test_pass_line_too_long_with_comments_ignore_comment_lines:
-  # Same case as above, but should pass as ignore_comment_lines is set to true
-  pass_str: |
-    SELECT
-    c1
-    ,--  the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line.
-    c2
-  configs:
-    rules:
-      max_line_length: 80
-      L016:
-        ignore_comment_lines: true
-
-test_fail_line_too_long_only_comments:
-  # Check long lines that are only comments are linted correctly
-  fail_str: "-- Some really long comments on their own line\n\nSELECT 1"
-  configs:
-    rules:
-      max_line_length: 18
-
-test_fail_line_too_long_handling_indents:
-  # Check we handle indents nicely
-  fail_str: "SELECT 12345\n"
-  fix_str: "SELECT\n    12345\n"
-  configs:
-    rules:
-      max_line_length: 10
-
-test_pass_line_too_long_ignore_comments_true:
-  # Check we can ignore comments if we want
-  pass_str: "SELECT 1\n-- Some long comment over 10 characters\n"
-  configs:
-    rules:
-      max_line_length: 10
-      L016:
-        ignore_comment_lines: true
-
-test_pass_line_too_long_ignore_comments_false:
-  # Check we still pick up long comments if we don't want to ignore
-  fail_str: "SELECT 1\n-- Some long comment over 10 characters\n"
-  configs:
-    rules:
-      max_line_length: 10
-      L016:
-        ignore_comment_lines: false
-
-test_compute_line_length_before_template_expansion_1:
-  # Line 3 is fine before expansion. Too long after expansion is NOT considered
-  # a violation.
-  pass_str: |
-    SELECT user_id
-    FROM
-        `{{bi_ecommerce_orders}}` {{table_at_job_start}}
-  configs:
-    core:
-      dialect: bigquery
-    templater:
-      jinja:
-        context:
-          table_at_job_start: FOR SYSTEM_TIME AS OF CAST('2021-03-02T01:22:59+00:00' AS TIMESTAMP)
-          bi_ecommerce_orders: bq-business-intelligence.user.ecommerce_orders
-
-
-test_compute_line_length_before_template_expansion_2:
-  # Line 3 is too long before expansion. It's fine after expansion, but the rule
-  # does not look at that.
-  fail_str: |
-    SELECT user_id
-    FROM
-        `{{bi_ecommerce_orders_bi_ecommerce_orders}}` AS {{table_alias_table_alias_table_alias_table_alias_table_alias_table_alias}}
-  configs:
-    core:
-      dialect: bigquery
-    templater:
-      jinja:
-        context:
-          bi_ecommerce_orders_bi_ecommerce_orders: bq-business-intelligence.user.ecommerce_orders
-          table_alias_table_alias_table_alias_table_alias_table_alias_table_alias: t
-
-
-test_long_jina_comment:
-  fail_str: |
-    SELECT *
-    {# comment #}
-    {# ........................................................................... #}
-    FROM table
-  configs:
-    rules:
-      max_line_length: 80
-      L016:
-        ignore_comment_lines: false
-
-
-test_long_jina_comment_ignore:
-  # A Jinja comment is not seen as a SQL comment (perhaps it should be?) so should still fail
-  fail_str: |
-    SELECT *
-    {# comment #}
-    {# ........................................................................... #}
-    FROM table
-  configs:
-    rules:
-      max_line_length: 80
-      L016:
-        ignore_comment_lines: true
-
-
-test_for_loop:
-  # A Jinja for loop
-  pass_str: |
-    {% for elem in 'foo' %}
-    SELECT '{{ elem }}' FROM table1;
-    SELECT '{{ elem }}' FROM table2;
-    {% endfor %}
-
-
-test_for_loop_repeating_elements_starts_with_literal:
-  # A Jinja for loop with repeating elements (that are difficult to match)
-  # but starting with a literal that can be used to match
-  pass_str: |
-    {% set elements = 'foo' %}
-    SELECT
-        CASE
-            {% for elem in elements %}
-            WHEN '{{ elem }}' = '' THEN 1
-            WHEN '{{ elem }}' = '' THEN 1
-            {% endfor %}
-        END
-
-
-test_for_loop_starting_with_templated_piece:
-  # A Jinja for loop starting with non-literals
-  # But unique parts can be used to match
-  pass_str: |
-    {% set elements = 'foo' %}
-    {% set when = 'WHEN' %}
-    SELECT
-        CASE
-            {% for elem in elements %}
-            {{ when }} '{{ elem }}' = '' THEN 1
-            {{ when }} '{{ elem }}' = '' THEN 2
-            {% endfor %}
-        END
-
-test_for_loop_fail_complex_match:
-  # A Jinja for loop starting with non-literals
-  # But non-unique parts which therefore cannot
-  # be used to match
-  pass_str: |
-    {% set elements = 'foo' %}
-    {% set when = 'WHEN' %}
-    SELECT
-        CASE
-            {% for elem in elements %}
-            {{ when }} '{{ elem }}' = '' THEN 1
-            {{ when }} '{{ elem }}' = '' THEN 1
-            {% endfor %}
-        END
-
-test_for_loop_fail_simple_match:
-  # If for loop only contains literals it should still pass
-  pass_str: |
-    {% set elements = 'foo' %}
-    SELECT
-        CASE
-            {% for elem in elements %}
-            WHEN 'f' THEN a
-            {% endfor %}
-        END
-
-
-test_set_statement:
-  # A Jinja set statement
-  pass_str: |
-    {% set statement = "SELECT 1 from table1;" %}
-    {{ statement }}{{ statement }}
-  configs:
-    rules:
-      max_line_length: 80
-
-
-test_issue_1666_line_too_long_unfixable_jinja:
-  # Note the trailing space at the end of line 1. This is a necessary part of
-  # the test, because the space (which is passed through to the output) was
-  # "tricking" L016 into trying to split the line, then encountering an internal
-  # error.
-  fail_str: "{{ config (schema='bronze', materialized='view', sort =['id','number'], dist = 'all', tags =['longlonglonglonglong']) }} \n\nselect 1\n"
-
-test_fail_ignore_comment_clauses_1:
-  # Too long, comment clause not ignored
-  fail_str: |
-    CREATE OR REPLACE TABLE mytable (
-        col1 NUMBER COMMENT 'col1 comment',
-        col2 BOOLEAN COMMENT 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length',
-        col3 NUMBER COMMENT 'col3 comment'
-    )
-
-test_fail_ignore_comment_clauses_2:
-  # Too long even after ignoring comment clause
-  fail_str: |
-    CREATE OR REPLACE TABLE mytable (
-        col1 NUMBER COMMENT 'col1 comment',
-        colaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbcccccccccccccccddddddddddddddddeeeeeeeeeeeeeee2 BOOLEAN COMMENT 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length',
-        col3 NUMBER COMMENT 'col3 comment'
-    )
-  configs:
-    rules:
-      L016:
-        ignore_comment_clauses: true
-
-test_pass_ignore_comment_clauses:
-  pass_str: |
-    CREATE OR REPLACE TABLE mytable (
-        col1 NUMBER COMMENT 'col1 comment',
-        col2 BOOLEAN COMMENT 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length',
-        col3 NUMBER COMMENT 'col3 comment'
-    )
-  configs:
-    rules:
-      L016:
-        ignore_comment_clauses: true
-
-test_pass_ignore_comment_clauses_teradata:
-  pass_str: |
-    comment on table sandbox_db.Org_Descendant is 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length';
-  configs:
-    core:
-      dialect: teradata
-    rules:
-      L016:
-        ignore_comment_clauses: true
-
-test_pass_ignore_comment_clauses_exasol:
-  pass_str: |
-    CREATE TABLE IF NOT EXISTS SCHEM.TAB (
-        ID DECIMAL(18, 0) IDENTITY CONSTRAINT PRIMARY KEY DISABLE COMMENT IS 'without constraint name'
-    ) COMMENT IS 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length';
-  configs:
-    core:
-      dialect: exasol
-    rules:
-      L016:
-        ignore_comment_clauses: true
-
-test_pass_ignore_comment_clauses_snowflake:
-  pass_str: |
-    CREATE TABLE foo_table (bar INTEGER) COMMENT = 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length'
-  configs:
-    core:
-      dialect: snowflake
-    rules:
-      L016:
-        ignore_comment_clauses: true
-
-test_pass_ignore_comment_clauses_postgres:
-  pass_str: |
-    CREATE TABLE IF NOT EXISTS foo
-    ( id UUID DEFAULT uuid_generate_v4() PRIMARY KEY,
-      name TEXT NOT NULL
-    );
-
-    COMMENT ON TABLE foo IS 'Windows Phone 8, however, was never able to overcome a long string of disappointments for Microsoft. ';
-  configs:
-    core:
-      dialect: postgres
-    rules:
-      L016:
-        ignore_comment_clauses: true
-
-test_fail_templated_comment_clause:
-  fail_str: |
-    SELECT *
-        {# ........................................................................... #}
-    FROM table
-  configs:
-    templater:
-      jinja:
-        context: {}
-
-test_pass_ignore_templated_comment_clause:
-  pass_str: |
-    SELECT *
-        {# ........................................................................... #}
-    FROM table
-  configs:
-    rules:
-      L016:
-        ignore_comment_clauses: true
-    templater:
-      jinja:
-        context: {}
diff --git a/test/fixtures/rules/std_rule_cases/L024.yml b/test/fixtures/rules/std_rule_cases/L024.yml
deleted file mode 100644
index 2204f8c..0000000
--- a/test/fixtures/rules/std_rule_cases/L024.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-rule: L024
-
-test_fail_no_space_after_using_clause:
-  fail_str: select * from a JOIN b USING(x)
-  fix_str: select * from a JOIN b USING (x)
-
-test_pass_newline_after_using_clause:
-  # Check L024 passes if there's a newline between
-  pass_str: |
-    select * from a JOIN b USING
-    (x)
diff --git a/test/fixtures/rules/std_rule_cases/L038.yml b/test/fixtures/rules/std_rule_cases/L038.yml
deleted file mode 100644
index d8ab7cf..0000000
--- a/test/fixtures/rules/std_rule_cases/L038.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-rule: L038
-
-test_require_pass:
-  pass_str: SELECT a, b, FROM foo
-  configs:
-    rules:
-      L038:
-        select_clause_trailing_comma: require
-
-test_require_fail:
-  fail_str: SELECT a, b FROM foo
-  fix_str: SELECT a, b, FROM foo
-  configs:
-    rules:
-      L038:
-        select_clause_trailing_comma: require
-
-
-test_forbid_pass:
-  pass_str: SELECT a, b FROM foo
-  configs:
-    rules:
-      L038:
-        select_clause_trailing_comma: forbid
-
-test_forbid_fail:
-  fail_str: SELECT a, b, FROM foo
-  fix_str: SELECT a, b FROM foo
-  configs:
-    rules:
-      L038:
-        select_clause_trailing_comma: forbid
diff --git a/test/fixtures/rules/std_rule_cases/L039.yml b/test/fixtures/rules/std_rule_cases/L039.yml
deleted file mode 100644
index c0a5fe2..0000000
--- a/test/fixtures/rules/std_rule_cases/L039.yml
+++ /dev/null
@@ -1,234 +0,0 @@
-rule: L039
-
-test_basic:
-  pass_str: SELECT 1
-
-test_basic_fix:
-  fail_str: SELECT     1
-  fix_str: SELECT 1
-
-test_simple_fix:
-  fail_str: |
-    select
-        1 + 2     + 3     + 4        -- Comment
-    from     foo
-  fix_str: |
-    select
-        1 + 2 + 3 + 4        -- Comment
-    from foo
-
-test_identifier_fix:
-  fail_str: |
-    SELECT [thistable] . [col]
-    FROM [thisdatabase] . [thisschema]
-            . [thistable]
-  fix_str: |
-    SELECT [thistable].[col]
-    FROM [thisdatabase].[thisschema].[thistable]
-  configs:
-    core:
-      dialect: tsql
-
-test_comparison_operator_fix:
-  fail_str: |
-    SELECT foo
-    FROM bar
-    WHERE baz > = 10;
-  fix_str: |
-    SELECT foo
-    FROM bar
-    WHERE baz >= 10;
-  configs:
-    core:
-      dialect: tsql
-
-test_comparison_operator_pass:
-  pass_str: |
-    SELECT foo
-    FROM bar
-    WHERE baz >= 10;
-  configs:
-    core:
-      dialect: tsql
-
-test_casting_operator_fix:
-  fail_str: |
-    SELECT '1' :: INT;
-  fix_str: |
-    SELECT '1'::INT;
-  configs:
-    core:
-      dialect: postgres
-
-test_casting_operator_pass:
-  pass_str: |
-    SELECT '1'::INT;
-  configs:
-    core:
-      dialect: postgres
-
-test_fix_tsql_spaced_chars:
-  fail_str: |
-    SELECT col1 FROM table1 WHERE 1 > = 1
-  fix_str: |
-    SELECT col1 FROM table1 WHERE 1 >= 1
-  configs:
-    core:
-      dialect: tsql
-
-# Check CASE Statement parses with newlines properly
-# See https://github.com/sqlfluff/sqlfluff/issues/2495
-test_pass_postgres_case_statement:
-  pass_str: |
-    SELECT
-        a,
-        CASE
-            WHEN 1 THEN 'one'
-            WHEN 2 THEN 'two'
-            ELSE 'other'
-        END AS b
-    FROM test;
-  configs:
-    core:
-      dialect: postgres
-
-test_excess_space_cast:
-  fail_str: |
-    select
-        '1'    ::   INT as id1,
-        '2'::int as id2
-    from table_a
-  fix_str: |
-    select
-        '1'::INT as id1,
-        '2'::int as id2
-    from table_a
-
-test_redshift_at_time_zone:
-  pass_str: |
-    SELECT
-    date_w_tz [0] AT TIME ZONE 'Etc/UTC' AS bar
-    FROM foo
-  configs:
-    core:
-      dialect: redshift
-
-test_excess_space_without_align_alias:
-  fail_str: |
-        SELECT
-            a    AS first_column,
-            b      AS second_column,
-            (a + b) / 2 AS third_column
-        FROM foo
-  fix_str: |
-    SELECT
-        a AS first_column,
-        b AS second_column,
-        (a + b) / 2 AS third_column
-    FROM foo
-  configs:
-    # This is the default config but we're being explicit
-    # here for testing.
-    layout:
-      type:
-        alias_expression:
-          spacing_before: single
-
-test_excess_space_with_align_alias:
-  # NOTE: The config here shouldn't move the table alias
-  fail_str: |
-        SELECT
-            a    AS first_column,
-            b      AS second_column,
-            (a + b) / 2 AS third_column
-        FROM foo   AS bar
-  fix_str: |
-    SELECT
-        a           AS first_column,
-        b           AS second_column,
-        (a + b) / 2 AS third_column
-    FROM foo AS bar
-  configs: &align_alias
-    layout:
-      type:
-        alias_expression:
-          spacing_before: align
-          align_within: select_clause
-          align_scope: bracketed
-
-test_missing_keyword_with_align_alias:
-  fail_str: |
-        SELECT
-            a    first_column,
-            b      AS second_column,
-            (a + b) / 2 AS third_column
-        FROM foo
-  fix_str: |
-    SELECT
-        a           first_column,
-        b           AS second_column,
-        (a + b) / 2 AS third_column
-    FROM foo
-  configs: *align_alias
-
-test_skip_alias_with_align_alias:
-  fail_str: |
-        SELECT
-            a   ,
-            b   ,
-            (a   +   b) /   2
-        FROM foo
-  fix_str: |
-    SELECT
-        a,
-        b,
-        (a + b) / 2
-    FROM foo
-  configs: *align_alias
-
-test_excess_space_with_align_alias_wider:
-  # NOTE: The config here SHOULD move the table alias
-  fail_str: |
-    SELECT
-        a    AS first_column,
-        b      AS second_column,
-        (a      +      b)      /      2 AS third_column
-    FROM foo   AS first_table
-    JOIN my_tbl AS second_table USING(a)
-  fix_str: |
-    SELECT
-        a           AS first_column,
-        b           AS second_column,
-        (a + b) / 2 AS third_column
-    FROM foo        AS first_table
-    JOIN my_tbl     AS second_table USING(a)
-  configs: &align_alias_wider
-    layout:
-      type:
-        alias_expression:
-          spacing_before: align
-          align_within: select_statement
-          align_scope: bracketed
-
-test_align_alias_boundary:
-  # The alias inside the expression shouldn't move.
-  fail_str: |
-    SELECT
-        a    AS first_column,
-        (SELECT b AS c)      AS second_column
-  fix_str: |
-    SELECT
-        a               AS first_column,
-        (SELECT b AS c) AS second_column
-  configs: *align_alias
-
-test_align_alias_inline_pass:
-  # The aliases on the same line shouldn't panic.
-  pass_str: SELECT a AS b, c AS d FROM tbl
-  configs: *align_alias
-
-test_align_alias_inline_fail:
-  # The aliases on the same line shouldn't panic.
-  fail_str: SELECT a   AS   b  ,   c   AS   d    FROM tbl
-  fix_str: SELECT a AS b, c AS d FROM tbl
-  configs: *align_alias
diff --git a/test/fixtures/rules/std_rule_cases/L071.yml b/test/fixtures/rules/std_rule_cases/L071.yml
deleted file mode 100644
index 1063baf..0000000
--- a/test/fixtures/rules/std_rule_cases/L071.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-rule: L071
-
-test_pass_parenthesis_block_isolated:
-  pass_str: |
-    SELECT * FROM (SELECT 1 AS C1) AS T1;
-
-
-test_pass_parenthesis_block_not_isolated:
-  fail_str: |
-    SELECT * FROM(SELECT 1 AS C1)AS T1;
-  fix_str: |
-    SELECT * FROM (SELECT 1 AS C1) AS T1;
-
-test_pass_parenthesis_function:
-  pass_str: |
-    SELECT foo(5) FROM T1;
diff --git a/test/fixtures/rules/std_rule_cases/LT01-brackets.yml b/test/fixtures/rules/std_rule_cases/LT01-brackets.yml
new file mode 100644
index 0000000..69240e0
--- /dev/null
+++ b/test/fixtures/rules/std_rule_cases/LT01-brackets.yml
@@ -0,0 +1,29 @@
+rule: LT01
+
+test_pass_parenthesis_block_isolated:
+  pass_str: |
+    SELECT * FROM (SELECT 1 AS C1) AS T1;
+
+test_pass_parenthesis_block_isolated_template:
+  pass_str: |
+    {{ 'SELECT * FROM (SELECT 1 AS C1) AS T1;' }}
+  configs:
+    core:
+      ignore_templated_areas: false
+
+test_fail_parenthesis_block_not_isolated:
+  fail_str: |
+    SELECT * FROM(SELECT 1 AS C1)AS T1;
+  fix_str: |
+    SELECT * FROM (SELECT 1 AS C1) AS T1;
+
+test_fail_parenthesis_block_not_isolated_templated:
+  fail_str: |
+    {{ 'SELECT * FROM(SELECT 1 AS C1)AS T1;' }}
+  configs:
+    core:
+      ignore_templated_areas: false
+
+test_pass_parenthesis_function:
+  pass_str: |
+    SELECT foo(5) FROM T1;
diff --git a/test/fixtures/rules/std_rule_cases/LT01-commas.yml b/test/fixtures/rules/std_rule_cases/LT01-commas.yml
new file mode 100644
index 0000000..acf9c01
--- /dev/null
+++ b/test/fixtures/rules/std_rule_cases/LT01-commas.yml
@@ -0,0 +1,53 @@
+rule: LT01
+
+test_fail_whitespace_before_comma:
+  fail_str: SELECT 1 ,4
+  fix_str: SELECT 1, 4
+
+test_fail_whitespace_before_comma_template:
+  fail_str: |
+    {{ 'SELECT 1 ,4' }}
+  configs:
+    core:
+      ignore_templated_areas: false
+
+test_pass_errors_only_in_templated_and_ignore:
+  pass_str: |
+    {{ 'SELECT 1 ,4' }}, 5, 6
+  configs:
+    core:
+      ignore_templated_areas: true
+
+test_fail_errors_only_in_non_templated_and_ignore:
+  fail_str: |
+    {{ 'SELECT 1, 4' }}, 5 , 6
+  fix_str: |
+    {{ 'SELECT 1, 4' }}, 5, 6
+  configs:
+    core:
+      ignore_templated_areas: true
+
+test_pass_single_whitespace_after_comma:
+  pass_str: SELECT 1, 4
+
+test_pass_single_whitespace_after_comma_template:
+  pass_str: |
+    {{ 'SELECT 1, 4' }}
+  configs:
+    core:
+      ignore_templated_areas: false
+
+test_fail_multiple_whitespace_after_comma:
+  fail_str: SELECT 1,   4
+  fix_str: SELECT 1, 4
+
+test_fail_no_whitespace_after_comma:
+  fail_str: SELECT 1,4
+  fix_str: SELECT 1, 4
+
+test_fail_no_whitespace_after_comma_2:
+  fail_str: SELECT FLOOR(dt) ,count(*) FROM test
+  fix_str: SELECT FLOOR(dt), count(*) FROM test
+
+test_pass_bigquery_trailing_comma:
+  pass_str: SELECT 1, 2,
diff --git a/test/fixtures/rules/std_rule_cases/LT01-excessive.yml b/test/fixtures/rules/std_rule_cases/LT01-excessive.yml
new file mode 100644
index 0000000..1278a06
--- /dev/null
+++ b/test/fixtures/rules/std_rule_cases/LT01-excessive.yml
@@ -0,0 +1,466 @@
+rule: LT01
+
+test_basic:
+  pass_str: SELECT 1
+
+test_basic_template:
+  pass_str: |
+    {{ 'SELECT 1' }}
+  configs:
+    core:
+      ignore_templated_areas: false
+
+test_basic_fix:
+  fail_str: SELECT     1
+  fix_str: SELECT 1
+
+test_basic_fail_template:
+  fail_str: |
+    {{ 'SELECT     1' }}
+  configs:
+    core:
+      ignore_templated_areas: false
+
+test_simple_fix:
+  fail_str: |
+    select
+        1 + 2     + 3     + 4        -- Comment
+    from     foo
+  fix_str: |
+    select
+        1 + 2 + 3 + 4        -- Comment
+    from foo
+
+test_identifier_fix:
+  fail_str: |
+    SELECT [thistable] . [col]
+    FROM [thisdatabase] . [thisschema]
+            . [thistable]
+  fix_str: |
+    SELECT [thistable].[col]
+    FROM [thisdatabase].[thisschema].[thistable]
+  configs:
+    core:
+      dialect: tsql
+
+test_comparison_operator_fix:
+  fail_str: |
+    SELECT foo
+    FROM bar
+    WHERE baz > = 10;
+  fix_str: |
+    SELECT foo
+    FROM bar
+    WHERE baz >= 10;
+  configs:
+    core:
+      dialect: tsql
+
+test_comparison_operator_pass:
+  pass_str: |
+    SELECT foo
+    FROM bar
+    WHERE baz >= 10;
+  configs:
+    core:
+      dialect: tsql
+
+test_casting_operator_fix:
+  fail_str: |
+    SELECT '1' :: INT;
+  fix_str: |
+    SELECT '1'::INT;
+  configs:
+    core:
+      dialect: postgres
+
+test_casting_operator_pass:
+  pass_str: |
+    SELECT '1'::INT;
+  configs:
+    core:
+      dialect: postgres
+
+test_fix_tsql_spaced_chars:
+  fail_str: |
+    SELECT col1 FROM table1 WHERE 1 > = 1
+  fix_str: |
+    SELECT col1 FROM table1 WHERE 1 >= 1
+  configs:
+    core:
+      dialect: tsql
+
+# Check CASE Statement parses with newlines properly
+# See https://github.com/sqlfluff/sqlfluff/issues/2495
+test_pass_postgres_case_statement:
+  pass_str: |
+    SELECT
+        a,
+        CASE
+            WHEN 1 THEN 'one'
+            WHEN 2 THEN 'two'
+            ELSE 'other'
+        END AS b
+    FROM test;
+  configs:
+    core:
+      dialect: postgres
+
+test_excess_space_cast:
+  fail_str: |
+    select
+        '1'    ::   INT as id1,
+        '2'::int as id2
+    from table_a
+  fix_str: |
+    select
+        '1'::INT as id1,
+        '2'::int as id2
+    from table_a
+
+test_redshift_at_time_zone:
+  pass_str: |
+    SELECT
+    date_w_tz[0] AT TIME ZONE 'Etc/UTC' AS bar
+    FROM foo
+  configs:
+    core:
+      dialect: redshift
+
+test_excess_space_without_align_alias:
+  fail_str: |
+        SELECT
+            a    AS first_column,
+            b      AS second_column,
+            (a + b) / 2 AS third_column
+        FROM foo
+  fix_str: |
+    SELECT
+        a AS first_column,
+        b AS second_column,
+        (a + b) / 2 AS third_column
+    FROM foo
+  configs:
+    # This is the default config but we're being explicit
+    # here for testing.
+    layout:
+      type:
+        alias_expression:
+          spacing_before: single
+
+test_excess_space_with_align_alias:
+  # NOTE: The config here shouldn't move the table alias
+  fail_str: |
+        SELECT
+            a    AS first_column,
+            b      AS second_column,
+            (a + b) / 2 AS third_column
+        FROM foo   AS bar
+  fix_str: |
+    SELECT
+        a           AS first_column,
+        b           AS second_column,
+        (a + b) / 2 AS third_column
+    FROM foo AS bar
+  configs: &align_alias
+    layout:
+      type:
+        alias_expression:
+          spacing_before: align
+          align_within: select_clause
+          align_scope: bracketed
+
+test_missing_keyword_with_align_alias:
+  fail_str: |
+        SELECT
+            a    first_column,
+            b      AS second_column,
+            (a + b) / 2 AS third_column
+        FROM foo
+  fix_str: |
+    SELECT
+        a           first_column,
+        b           AS second_column,
+        (a + b) / 2 AS third_column
+    FROM foo
+  configs: *align_alias
+
+test_skip_alias_with_align_alias:
+  fail_str: |
+        SELECT
+            a   ,
+            b   ,
+            (a   +   b) /   2
+        FROM foo
+  fix_str: |
+    SELECT
+        a,
+        b,
+        (a + b) / 2
+    FROM foo
+  configs: *align_alias
+
+test_excess_space_with_align_alias_wider:
+  # NOTE: The config here SHOULD move the table alias
+  # NOTE: The combined LT01 also fixes the missing space
+  # between `USING` and `(a)`.
+  fail_str: |
+    SELECT
+        a    AS first_column,
+        b      AS second_column,
+        (a      +      b)      /      2 AS third_column
+    FROM foo   AS first_table
+    JOIN my_tbl AS second_table USING(a)
+  fix_str: |
+    SELECT
+        a           AS first_column,
+        b           AS second_column,
+        (a + b) / 2 AS third_column
+    FROM foo        AS first_table
+    JOIN my_tbl     AS second_table USING (a)
+  configs: &align_alias_wider
+    layout:
+      type:
+        alias_expression:
+          spacing_before: align
+          align_within: select_statement
+          align_scope: bracketed
+
+test_align_alias_boundary:
+  # The alias inside the expression shouldn't move.
+  fail_str: |
+    SELECT
+        a    AS first_column,
+        (SELECT b AS c)      AS second_column
+  fix_str: |
+    SELECT
+        a               AS first_column,
+        (SELECT b AS c) AS second_column
+  configs: *align_alias
+
+test_align_alias_inline_pass:
+  # The aliases on the same line shouldn't panic.
+  pass_str: SELECT a AS b, c AS d FROM tbl
+  configs: *align_alias
+
+test_align_alias_inline_fail:
+  # The aliases on the same line shouldn't panic.
+  fail_str: SELECT a   AS   b  ,   c   AS   d    FROM tbl
+  fix_str: SELECT a AS b, c AS d FROM tbl
+  configs: *align_alias
+
+test_pass_snowflake_semi_structured:
+  pass_str: "SELECT to_array(a.b:c) FROM d"
+  configs:
+    core:
+      dialect: snowflake
+
+test_fail_snowflake_semi_structured_single:
+  fail_str: |
+    SELECT
+      to_array(a.b : c) as d,
+      e : f : g::string as h
+    FROM j
+  fix_str: |
+    SELECT
+      to_array(a.b:c) as d,
+      e:f:g::string as h
+    FROM j
+  configs:
+    core:
+      dialect: snowflake
+
+test_fail_snowflake_semi_structured_multi:
+  fail_str: |
+    SELECT
+      to_array(a.b    :    c) as d,
+      e    :    f    :    g::string as h
+    FROM j
+  fix_str: |
+    SELECT
+      to_array(a.b:c) as d,
+      e:f:g::string as h
+    FROM j
+  configs:
+    core:
+      dialect: snowflake
+
+test_pass_bigquery_specific:
+  # Test a selection of bigquery specific spacings work.
+  # Specifically EXCEPT & qualified functions.
+  pass_str: |
+    SELECT * EXCEPT (order_id);
+    SELECT NET.HOST(LOWER(url)) AS host FROM urls;
+  configs:
+    core:
+      dialect: bigquery
+
+test_pass_bigquery_specific_arrays_1:
+  # An example of _no whitespace_ after an array type
+  pass_str: |
+    SELECT ARRAY<FLOAT64>[1, 2, 3] AS floats;
+  configs:
+    core:
+      dialect: bigquery
+
+test_pass_bigquery_specific_arrays_2:
+  # An example of _whitespace_ after an array type
+  pass_str: |
+    CREATE TEMPORARY FUNCTION DoSomething(param1 STRING, param2 STRING)
+    RETURNS ARRAY<STRING> LANGUAGE js AS """Some JS""";
+
+    SELECT DoSomething(col1) FROM table1
+  configs:
+    core:
+      dialect: bigquery
+
+test_pass_bigquery_specific_structs:
+  # Test spacing of complex STRUCT brackets
+  pass_str: |
+    create table testing.array_struct_tbl (
+        address_array_of_nested_structs
+        ARRAY<STRUCT<coll STRUCT<col1_1 STRING, col1_2 INT64>, col2 STRING>>
+    )
+  configs:
+    core:
+      dialect: bigquery
+
+test_pass_bigquery_specific_struct_access:
+  # Test spacing of function access
+  pass_str: |
+    SELECT
+      testFunction(a).b AS field,
+      testFunction(a).* AS wildcard,
+      testFunction(a).b.c AS field_with_field,
+      testFunction(a).b.* AS field_with_wildcard,
+      testFunction(a)[OFFSET(0)].* AS field_with_offset_wildcard,
+      testFunction(a)[SAFE_OFFSET(0)].* AS field_with_safe_offset_wildcard,
+      testFunction(a)[ORDINAL(1)].* AS field_with_ordinal_wildcard,
+      testFunction(a)[ORDINAL(1)].a AS field_with_ordinal_field
+    FROM table1
+  configs:
+    core:
+      dialect: bigquery
+
+test_postgres_datatype:
+  # https://github.com/sqlfluff/sqlfluff/issues/4521
+  # https://github.com/sqlfluff/sqlfluff/issues/4565
+  pass_str: |
+    select
+        1::NUMERIC(3, 1),
+        2::double precision,
+        '2020-01-01'::timestamp with time zone,
+        'foo'::character varying,
+        B'10101'::bit(3),
+        B'10101'::bit varying(3),
+        B'10101'::bit varying
+  configs:
+    core:
+      dialect: postgres
+
+test_redshift_datatype:
+  pass_str: |
+    select
+        1::NUMERIC(3, 1),
+        2::double precision,
+        '2020-01-01'::timestamp with time zone,
+        'foo'::character varying,
+        'foo'::character varying(MAX),
+        'foo'::character varying(255),
+        '10101'::binary varying(6)
+  configs:
+    core:
+      dialect: redshift
+
+test_bigquery_datatype:
+  pass_str: |
+      select 1::NUMERIC(3, 1)
+  configs:
+    core:
+      dialect: bigquery
+
+test_athena_datatype:
+  pass_str: |
+      select
+          1::DECIMAL(3, 1),
+          'foo'::VARCHAR(4),
+          'bar'::CHAR(3),
+          col1::STRUCT<foo: int>,
+          col2::ARRAY<int>,
+          '2020-01-01'::timestamp with time zone
+  configs:
+    core:
+      dialect: athena
+
+test_hive_datatype:
+  pass_str: |
+      select
+          1::DECIMAL(3, 1),
+          1::DEC(3, 1),
+          1::NUMERIC(3, 1),
+          col1::STRUCT<foo: int>,
+          col2::ARRAY<int>,
+          col3::ARRAY<int>[4]
+  configs:
+    core:
+      dialect: hive
+
+test_sqlite_datatype:
+  pass_str: |
+      select
+          1::double precision,
+          1::DECIMAL(10, 5),
+          1::unsigned big int,
+          'foo'::varying character(255),
+          'foo'::character(20),
+          'foo'::nvarchar(200)
+  configs:
+    core:
+      dialect: sqlite
+
+test_sparksql_datatype:
+  pass_str: |
+      select
+          1::DECIMAL(3, 1),
+          1::DEC(3, 1),
+          1::NUMERIC(3, 1),
+          'bar'::CHAR(3),
+          col1::STRUCT<foo: int>,
+          col2::ARRAY<int>
+  configs:
+    core:
+      dialect: sparksql
+
+test_exasol_datatype:
+  pass_str: |
+      select
+          1::double precision,
+          1::DECIMAL(3, 1),
+          1::NUMERIC(3, 1),
+          'bar'::VARCHAR(2000 CHAR),
+          col1::INTERVAL DAY(2) TO SECOND(1)
+  configs:
+    core:
+      dialect: exasol
+
+test_teradata_datatype:
+  pass_str: |
+      select
+          1::DECIMAL(3, 1),
+          1::DEC(3, 1),
+          1::NUMERIC(3, 1),
+          'bar'::CHAR(3)
+  configs:
+    core:
+      dialect: teradata
+
+test_tsql_datatype:
+  pass_str: |
+      select
+          1::DECIMAL(3, 1),
+          1::DEC(3, 1),
+          1::NUMERIC(3, 1),
+          'bar'::character varying(3)
+  configs:
+    core:
+      dialect: tsql
diff --git a/test/fixtures/rules/std_rule_cases/L048.yml b/test/fixtures/rules/std_rule_cases/LT01-literals.yml
similarity index 79%
rename from test/fixtures/rules/std_rule_cases/L048.yml
rename to test/fixtures/rules/std_rule_cases/LT01-literals.yml
index b402980..77fc8ae 100644
--- a/test/fixtures/rules/std_rule_cases/L048.yml
+++ b/test/fixtures/rules/std_rule_cases/LT01-literals.yml
@@ -1,4 +1,4 @@
-rule: L048
+rule: LT01
 
 test_pass_simple_select:
   pass_str: "SELECT 'foo'"
@@ -8,15 +8,13 @@ test_pass_expression:
   pass_str: "SELECT ('foo' || 'bar') as buzz"
 
 test_fail_as:
-  # NOTE: we should fix the _missing_ whitespace but not the excess.
-  # That's covered in L039.
   fail_str: |
     SELECT
         'foo'AS   bar
     FROM foo
   fix_str: |
     SELECT
-        'foo' AS   bar
+        'foo' AS bar
     FROM foo
 
 test_fail_expression:
@@ -125,3 +123,23 @@ test_pass_sparksql_multi_units_interval_minus:
   configs:
     core:
       dialect: sparksql
+
+test_fail_old_python_test:
+  fail_str: SELECT a +'b'+'c' FROM tbl;
+  fix_str: SELECT a + 'b' + 'c' FROM tbl;
+  violations:
+    - code: LT01
+      description: Expected single whitespace between binary operator '+' and quoted literal.
+      line_no: 1
+      line_pos: 11
+      name: layout.spacing
+    - code: LT01
+      description: Expected single whitespace between quoted literal and binary operator '+'.
+      line_no: 1
+      line_pos: 14
+      name: layout.spacing
+    - code: LT01
+      description: Expected single whitespace between binary operator '+' and quoted literal.
+      line_no: 1
+      line_pos: 15
+      name: layout.spacing
diff --git a/test/fixtures/rules/std_rule_cases/L023.yml b/test/fixtures/rules/std_rule_cases/LT01-missing.yml
similarity index 75%
rename from test/fixtures/rules/std_rule_cases/L023.yml
rename to test/fixtures/rules/std_rule_cases/LT01-missing.yml
index 427ad2e..2a93d78 100644
--- a/test/fixtures/rules/std_rule_cases/L023.yml
+++ b/test/fixtures/rules/std_rule_cases/LT01-missing.yml
@@ -1,4 +1,14 @@
-rule: L023
+rule: LT01
+
+test_fail_no_space_after_using_clause:
+  fail_str: select * from a JOIN b USING(x)
+  fix_str: select * from a JOIN b USING (x)
+
+test_pass_newline_after_using_clause:
+  # Check LT01 passes if there's a newline between
+  pass_str: |
+    select * from a JOIN b USING
+    (x)
 
 test_fail_cte_no_space_after_as:
   # Check fixing of single space rule when space is missing
diff --git a/test/fixtures/rules/std_rule_cases/L006.yml b/test/fixtures/rules/std_rule_cases/LT01-operators.yml
similarity index 75%
rename from test/fixtures/rules/std_rule_cases/L006.yml
rename to test/fixtures/rules/std_rule_cases/LT01-operators.yml
index 390df15..01fc4df 100644
--- a/test/fixtures/rules/std_rule_cases/L006.yml
+++ b/test/fixtures/rules/std_rule_cases/LT01-operators.yml
@@ -1,4 +1,4 @@
-rule: L006
+rule: LT01
 
 test_pass_brackets:
   # Test that we don't fail * operators in brackets
@@ -48,17 +48,15 @@ test_pass_newline_£:
 test_pass_sign_indicators:
   pass_str: SELECT 1, +2, -4
 
+test_pass_tilde:
+  pass_str: SELECT ~1
+
 # -------------------
 
 fail_simple:
   fail_str: "SELECT 1+2"
   fix_str: "SELECT 1 + 2"
 
-dont_fail_on_too_much_whitespace:
-  # Too much whitespace should be caught by L039
-  pass_str: "SELECT 1   +   2"
-
-
 pass_bigquery_hyphen:
   # hyphenated table reference should not fail
   pass_str: SELECT col_foo FROM foo-bar.foo.bar
@@ -89,3 +87,21 @@ pass_tsql_assignment_operator:
 
 pass_concat_string:
   pass_str: SELECT 'barry' || 'pollard'
+
+test_pass_placeholder_spacing:
+  # Test for spacing issues around placeholders
+  # https://github.com/sqlfluff/sqlfluff/issues/4253
+  pass_str: |
+    {% set is_dev_environment = true %}
+
+    SELECT *
+    FROM table
+    WHERE
+        some_col IS TRUE
+        {% if is_dev_environment %}
+            AND created_at >= DATE_SUB(CURRENT_DATE, INTERVAL 7 DAY)
+        {% else %}
+            AND created_at >= DATE_SUB(CURRENT_DATE, INTERVAL 30 DAY)
+        {% endif %}
+        AND TRUE
+    ;
diff --git a/test/fixtures/rules/std_rule_cases/L001.yml b/test/fixtures/rules/std_rule_cases/LT01-trailing.yml
similarity index 98%
rename from test/fixtures/rules/std_rule_cases/L001.yml
rename to test/fixtures/rules/std_rule_cases/LT01-trailing.yml
index 1d780cd..510a402 100644
--- a/test/fixtures/rules/std_rule_cases/L001.yml
+++ b/test/fixtures/rules/std_rule_cases/LT01-trailing.yml
@@ -1,4 +1,4 @@
-rule: L001
+rule: LT01
 
 test_fail_trailing_whitespace:
   fail_str: "SELECT 1     \n"
diff --git a/test/fixtures/rules/std_rule_cases/L003.yml b/test/fixtures/rules/std_rule_cases/LT02-indent.yml
similarity index 56%
rename from test/fixtures/rules/std_rule_cases/L003.yml
rename to test/fixtures/rules/std_rule_cases/LT02-indent.yml
index 14fb870..b1253cd 100644
--- a/test/fixtures/rules/std_rule_cases/L003.yml
+++ b/test/fixtures/rules/std_rule_cases/LT02-indent.yml
@@ -1,11 +1,17 @@
-rule: L003
+rule: LT02
 
 test_fail_reindent_first_line_1:
   fail_str: "     SELECT 1"
   fix_str: SELECT 1
+  violations:
+    - code: LT02
+      description: First line should not be indented.
+      line_no: 1
+      line_pos: 1
+      name: layout.indent
 
 test_fail_reindent_first_line_2:
-  # Github Bug #99. Python2 Issues with fixing L003
+  # Github Bug #99. Python2 Issues with fixing LT02
   fail_str: "  select 1 from tbl;"
   fix_str: select 1 from tbl;
 
@@ -48,8 +54,14 @@ test_fail_tab_indentation:
     	b
     FROM my_tbl
   configs:
-    rules:
+    indentation:
       indent_unit: tab
+  violations:
+    - code: LT02
+      description: Expected indent of 1 tabs.
+      line_no: 3
+      line_pos: 1
+      name: layout.indent
 
 test_pass_indented_joins_default:
   # Configurable indents work.
@@ -93,6 +105,12 @@ test_fail_indented_joins_true_fix:
   configs:
     indentation:
       indented_joins: true
+  violations:
+    - code: LT02
+      description: Expected indent of 4 spaces.
+      line_no: 3
+      line_pos: 1
+      name: layout.indent
 
 test_fail_indented_joins_false_fix:
   # e) specific False, and failing
@@ -227,12 +245,13 @@ test_pass_indented_on_contents_default:
         s.b
     FROM r
     JOIN s
-        ON r.a = s.a
+        ON
+            r.a = s.a
             AND true
 
 test_pass_indented_on_contents_true:
   # Test indented_on_contents when true (default)
-  pass_str: |
+  fail_str: |
     SELECT
         r.a,
         s.b
@@ -240,6 +259,15 @@ test_pass_indented_on_contents_true:
     JOIN s
         ON r.a = s.a
             AND true
+  fix_str: |
+    SELECT
+        r.a,
+        s.b
+    FROM r
+    JOIN s
+        ON
+            r.a = s.a
+            AND true
   configs:
     indentation:
       indented_on_contents: true
@@ -257,7 +285,8 @@ test_pass_indented_on_contents_false:
   configs:
     indentation:
       indented_on_contents: false
-test_fail_indented_on_contents_default_fix:
+
+test_fail_indented_on_contents_default_fix_a:
   # Default config for indented_on_contents is true
   fail_str: |
     SELECT *
@@ -269,9 +298,25 @@ test_fail_indented_on_contents_default_fix:
     SELECT *
     FROM t1
     JOIN t2
-        ON true
+        ON
+            true
             AND true
 
+test_fail_indented_on_contents_default_fix_b:
+  # Default config for indented_on_contents is true.
+  # This is an alternate interpretation of untaken indents.
+  fail_str: |
+    SELECT *
+    FROM t1
+    JOIN t2 ON true
+    AND true
+  fix_str: |
+    SELECT *
+    FROM t1
+    JOIN t2 ON
+        true
+        AND true
+
 test_fail_indented_on_contents_false_fix:
   fail_str: |
     SELECT
@@ -339,7 +384,7 @@ test_fail_indented_multi_line_comment:
 
 test_jinja_with_disbalanced_pairs:
   # The range(3) -%} results in swallowing the \n
-  # N.B. The way L003 handles this is questionable,
+  # N.B. The way LT02 handles this is questionable,
   # and this test seals in that behaviour.
   pass_str: |
     SELECT
@@ -351,27 +396,31 @@ test_jinja_with_disbalanced_pairs:
     FROM orders
 
 test_fail_attempted_hanger_fix:
-  # It's almost a hanger and so should be corrected to one.
+  # Check messy hanger correction.
   fail_str: |
     SELECT coalesce(foo,
                   bar)
        FROM tbl
   fix_str: |
-    SELECT coalesce(foo,
-                    bar)
+    SELECT
+        coalesce(
+            foo,
+            bar
+        )
     FROM tbl
 
 test_fail_possible_hanger_fix:
-  # It's not even close to a hanging indent, but we should
-  # still attempt this, although this is potentially a questionable
-  # fix and could be an approach we change in the future.
+  # Same note as above, but with a messier example.
   fail_str: |
     SELECT coalesce(foo,
      bar)
        FROM tbl
   fix_str: |
-    SELECT coalesce(foo,
-                    bar)
+    SELECT
+        coalesce(
+            foo,
+            bar
+        )
     FROM tbl
 
 test_fail_consecutive_hangers:
@@ -385,28 +434,59 @@ test_fail_consecutive_hangers:
       and e like 'e%'
       and f like 'f%'
   fix_str: |
+    select *
+    from foo
+    where
+        a like 'a%'
+        and b like 'b%'
+        and c like 'c%'
+        and d like 'd%'
+        and e like 'e%'
+        and f like 'f%'
+
+test_fail_consecutive_hangers_implicit:
+  # NOTE: The allowed implicit indent in the WHERE clause,
+  # but by default they're not enabled.
+  fail_str: |
     select *
     from foo
     where a like 'a%'
-          and b like 'b%'
-          and c like 'c%'
-          and d like 'd%'
-          and e like 'e%'
-          and f like 'f%'
+      and b like 'b%'
+      and c like 'c%'
+      and d like 'd%'
+      and e like 'e%'
+      and f like 'f%'
+  fix_str: |
+    select *
+    from foo
+    where a like 'a%'
+        and b like 'b%'
+        and c like 'c%'
+        and d like 'd%'
+        and e like 'e%'
+        and f like 'f%'
+  configs:
+    indentation:
+      allow_implicit_indents: true
 
 test_fail_clean_reindent_fix:
   # A "clean" indent is where the previous line ends with an
   # indent token (as per this example). We should use the
   # default approach and indent by 1 step.
+  # NOTE: That because the indent opened before "coalesce"
+  # isn't closed before the end of the line, we force an
+  # additional indent before it.
   fail_str: |
     SELECT coalesce(
     foo,
                     bar)
        FROM tbl
   fix_str: |
-    SELECT coalesce(
-        foo,
-        bar)
+    SELECT
+        coalesce(
+            foo,
+            bar
+        )
     FROM tbl
 
 # https://github.com/sqlfluff/sqlfluff/issues/643
@@ -714,16 +794,20 @@ test_tsql_function:
     AS
     BEGIN
         DECLARE @ISOweek int;
-        SET @ISOweek = DATEPART(wk, @DATE) + 1
+        SET
+            @ISOweek = DATEPART(wk, @DATE) + 1
             - DATEPART(wk, CAST(DATEPART(yy, @DATE) AS char(4)) + '0104');
         --Special cases Jan 1-3 may belong to the previous year
         IF (@ISOweek = 0)
-            SET @ISOweek = dbo.ISOWEEK(CAST(DATEPART(yy, @DATE) - 1
+            SET @ISOweek = dbo.ISOWEEK(CAST(
+                DATEPART(yy, @DATE) - 1
                 AS char(4)
-                ) + '12' + CAST(24 + DATEPART(day, @DATE) AS char(2))) + 1;
+            ) + '12' + CAST(24 + DATEPART(day, @DATE) AS char(2))) + 1;
         --Special case Dec 29-31 may belong to the next year
-        IF ((DATEPART(mm, @DATE) = 12)
-            AND ((DATEPART(dd, @DATE) - DATEPART(dw, @DATE)) >= 28))
+        IF (
+            (DATEPART(mm, @DATE) = 12)
+            AND ((DATEPART(dd, @DATE) - DATEPART(dw, @DATE)) >= 28)
+        )
             SET @ISOweek = 1;
         RETURN(@ISOweek);
     END;
@@ -790,7 +874,7 @@ test_fail_ignore_templated_whitespace_3:
 
 test_pass_ignore_templated_whitespace_4:
   # Note the newline after c2. This causes "AS other_id" to be on a different
-  # line in templated space, but not raw space. L003 should ignore lines like
+  # line in templated space, but not raw space. LT02 should ignore lines like
   # this.
   pass_str: |
     SELECT
@@ -848,25 +932,33 @@ test_fail_fix_template_indentation_2:
 test_pass_tsql_update_indent:
   pass_str: |
     update Extracts.itt_parm_base
-        set DateF = convert(varchar, @from_date, 112),
-            DateT = convert(varchar, @to_date, 112)
+    set
+        DateF = convert(varchar, @from_date, 112),
+        DateT = convert(varchar, @to_date, 112)
   configs:
     core:
       dialect: tsql
 
 test_pass_tsql_declare_indent:
-  pass_str: |
+  fail_str: |
     DECLARE @prv_qtr_1st_dt DATETIME,
             @last_qtr INT,
             @last_qtr_first_mn INT,
             @last_qtr_yr INT;
+  fix_str: |
+    DECLARE
+        @prv_qtr_1st_dt DATETIME,
+        @last_qtr INT,
+        @last_qtr_first_mn INT,
+        @last_qtr_yr INT;
   configs:
     core:
       dialect: tsql
 
 test_pass_tsql_set_indent:
   pass_str: |
-    SET @prv_qtr_1st_dt = CAST(@last_qtr_yr AS VARCHAR(4)) + '-' +
+    SET
+        @prv_qtr_1st_dt = CAST(@last_qtr_yr AS VARCHAR(4)) + '-' +
         CAST(@last_qtr_first_mn AS VARCHAR(2)) + '-01'
   configs:
     core:
@@ -874,7 +966,8 @@ test_pass_tsql_set_indent:
 
 test_pass_tsql_set_indent_multiple_params:
   pass_str: |
-    SET @param1 = 1,
+    SET
+        @param1 = 1,
         @param2 = 2
   configs:
     core:
@@ -882,7 +975,8 @@ test_pass_tsql_set_indent_multiple_params:
 
 test_pass_tsql_if_indent:
   pass_str: |
-    IF 1 > 1 AND
+    IF
+        1 > 1 AND
         2 < 2
         SELECT 1;
   configs:
@@ -993,8 +1087,8 @@ test_fail_snowflake_merge_statement:
             OR
             foo IN ('BAR','FOO')
     ) as src
-    on
-        src.foo = tgt.foo
+        on
+            src.foo = tgt.foo
     when matched then
         update set
             tgt.foo = src.foo
@@ -1003,14 +1097,8 @@ test_fail_snowflake_merge_statement:
     core:
       dialect: snowflake
 
-test_pass_hanging_indents_allowed_in_default:
-  pass_str: |
-    SELECT
-        a.line + (a.with
-                  + a.hanging_indent) as actually_ok,
-    FROM tbl as a
-
 test_fail_hanging_indents_convert_to_normal_indent:
+  # This takes advantage of new indent treatment in 2.0.x
   fail_str: |
     SELECT
         a.line + (a.with
@@ -1018,15 +1106,14 @@ test_fail_hanging_indents_convert_to_normal_indent:
     FROM tbl as a
   fix_str: |
     SELECT
-        a.line + (a.with
-            + a.hanging_indent) as actually_not_ok,
+        a.line + (
+            a.with
+            + a.hanging_indent
+        ) as actually_not_ok,
     FROM tbl as a
-  configs:
-    rules:
-      L003:
-        hanging_indents: false
 
 test_fail_hanging_indents_fix_mixed_indents:
+  # The tab is removed.
   fail_str: |
     SELECT
         a.line + (
@@ -1051,10 +1138,6 @@ test_fail_hanging_indents_fix_mixed_indents:
             )
         ) as some_harder_problems
     FROM tbl as a
-  configs:
-    rules:
-      L003:
-        hanging_indents: false
 
 test_pass_indented_procedure_parameters:
   pass_str: |
@@ -1081,7 +1164,7 @@ test_fail_unindented_procedure_parameters:
       dialect: tsql
 
 test_tsql_bubble_up_newline_after_fix:
-  # Tests issue 3303, where an L003 fix leaves a newline as the final child
+  # Tests issue 3303, where an LT02 fix leaves a newline as the final child
   # segment that has to be "bubbled up" two levels to avoid violating the
   # _is_code_or_meta() check in core/parser/segments/base.py.
   fail_str: |
@@ -1179,3 +1262,682 @@ test_tsql_outer_apply_indentation_fix:
   configs:
     core:
       dialect: tsql
+
+test_fail_consuming_whitespace_a:
+  # Test that this works even with tags which consume whitespace.
+  fail_str: |
+    {% for item in [1, 2] -%}
+    SELECT *
+    FROM some_table
+    {{ 'UNION ALL\n' if not loop.last }}
+    {%- endfor %}
+  fix_str: |
+    {% for item in [1, 2] -%}
+        SELECT *
+        FROM some_table
+        {{ 'UNION ALL\n' if not loop.last }}
+    {%- endfor %}
+
+test_fail_consuming_whitespace_b:
+  # Additional test to make sure that crazy things don't happen
+  # with the first newline.
+  fail_str: |
+    {% for item in [1, 2] -%}
+        SELECT *
+    FROM some_table
+    {{ 'UNION ALL\n' if not loop.last }}
+    {%- endfor %}
+  fix_str: |
+    {% for item in [1, 2] -%}
+        SELECT *
+        FROM some_table
+        {{ 'UNION ALL\n' if not loop.last }}
+    {%- endfor %}
+
+test_pass_consuming_whitespace_stable:
+  # Test for stability in fixes with loops and consuming tags.
+  # https://github.com/sqlfluff/sqlfluff/issues/3185
+  pass_str: |
+    {% for item in [1, 2] -%}
+        SELECT *
+        FROM some_table
+        {{ 'UNION ALL\n' if not loop.last }}
+    {%- endfor %}
+
+test_fail_trailing_comments:
+  # Additional test to make sure that crazy things don't happen
+  # with the first newline.
+  fail_str: |
+    SELECT 1
+        -- foo
+            -- bar
+  fix_str: |
+    SELECT 1
+    -- foo
+    -- bar
+
+test_fail_case_statement:
+  # Test for issue with case statement indentation:
+  # https://github.com/sqlfluff/sqlfluff/issues/3836
+  fail_str: |
+    SELECT
+    foo
+    , CASE
+    WHEN 1 = 1
+    THEN 2
+    END AS example
+    FROM tbl
+  fix_str: |
+    SELECT
+      foo
+      , CASE
+        WHEN 1 = 1
+          THEN 2
+      END AS example
+    FROM tbl
+  configs:
+    indentation:
+      tab_space_size: 2
+
+test_pass_templated_case_statement:
+  # Test for template block in case statement indentation
+  # https://github.com/sqlfluff/sqlfluff/issues/3988
+  pass_str: |
+    {%- set json_keys = ["a", "b", "c"] -%}
+
+    with
+    dummy as (
+        select
+            {% for json_key in json_keys -%}
+                case
+                    when 1 = 1
+                        {% if json_key in ["b"] %}
+                            then 0
+                        {% else %}
+                            then 1
+                        {% endif %}
+                    else null
+                end as {{ json_key }}_suffix{% if not loop.last %}, {% endif %}
+            {% endfor %}
+    )
+
+    select *
+    from dummy
+
+test_pass_jinja_tag_multiline:
+  # Test that jinja block tags which contain newlines
+  # aren't linted, because we can't reliably fix them.
+  # The default fixing routine would only moving the
+  # start of the tag, which is ok but potentially strange.
+  # TODO: At some point we should find a better solution for
+  # this.
+  pass_str: |
+    SELECT
+        1,
+    {{
+            "my_jinja_tag_with_odd_indents"
+          }},
+        2,
+          {%
+      if True
+    %}
+            3,  -- NOTE: indented because within block
+    {%       endif
+    %}
+        4
+
+test_pass_trailing_inline_noqa:
+  pass_str: |
+    SELECT
+        col1,
+        col2
+    FROM
+        table1 -- noqa: CV09
+
+test_pass_implicit_indent:
+  # Test for ImplicitIndent.
+  # The theoretical indent between WHERE and "a" is implicit.
+  pass_str: |
+    SELECT *
+    FROM foo
+    WHERE a
+        AND b
+  configs:
+    indentation:
+      allow_implicit_indents: true
+
+test_fail_deny_implicit_indent:
+  # Test for ImplicitIndent.
+  # The theoretical indent between WHERE and "a" is implicit.
+  fail_str: |
+    SELECT *
+    FROM foo
+    WHERE a
+        AND b
+  fix_str: |
+    SELECT *
+    FROM foo
+    WHERE
+        a
+        AND b
+  configs:
+    indentation:
+      allow_implicit_indents: false
+
+test_pass_templated_newlines:
+  # NOTE: The macro has many newlines in it,
+  # and the calling of it is indented. Check that
+  # this doesn't panic.
+  pass_str: |
+    {% macro my_macro() %}
+
+      macro
+      + with_newlines
+
+    {% endmacro %}
+
+    SELECT
+        {{ my_macro() }} as awkward_indentation
+    FROM foo
+
+test_fail_fix_beside_templated:
+  # Check that templated code checks aren't too aggressive.
+  # https://github.com/sqlfluff/sqlfluff/issues/4215
+  fail_str: |
+    {% if False %}
+    SELECT 1
+    {% else %}
+    SELECT c
+    FROM t
+    WHERE c < 0
+    {% endif %}
+  fix_str: |
+    {% if False %}
+    SELECT 1
+    {% else %}
+        SELECT c
+        FROM t
+        WHERE c < 0
+    {% endif %}
+
+test_pass_block_comment:
+  # Check that subsequent block comment lines are ok to be indented.
+  # https://github.com/sqlfluff/sqlfluff/issues/4224
+  pass_str: |
+    SELECT
+        /* This comment
+           is unusually indented
+              - and contains
+              - even more indents
+        */
+        foo
+    FROM bar
+
+test_fix_block_comment:
+  # Check other comments are still fixed.
+  # https://github.com/sqlfluff/sqlfluff/issues/4224
+  fail_str: |
+    SELECT
+      -- bad
+        -- good
+        foo,
+      /* bad */
+           foo_bad,
+        /* long
+           comment which should keep indent
+              - including this
+        */
+        good_foo,
+        /*
+            and this
+        this is ok
+    this is NOT ok
+        */
+        bar
+    FROM tbl
+  fix_str: |
+    SELECT
+        -- bad
+        -- good
+        foo,
+        /* bad */
+        foo_bad,
+        /* long
+           comment which should keep indent
+              - including this
+        */
+        good_foo,
+        /*
+            and this
+        this is ok
+        this is NOT ok
+        */
+        bar
+    FROM tbl
+
+test_fail_case_else_end_clause:
+  # Checks linting of missing newline in CASE statement.
+  # More specifically this is a case of a multi-dedent
+  # not being handled properly when one of the indents
+  # it covers is taken, but the other is untaken.
+  # https://github.com/sqlfluff/sqlfluff/issues/4222
+  fail_str: |
+    select
+        case
+            when a then 'abc'
+            when b then 'def'
+            else 'ghi' end as field,
+        bar
+    from foo
+  fix_str: |
+    select
+        case
+            when a then 'abc'
+            when b then 'def'
+            else 'ghi'
+        end as field,
+        bar
+    from foo
+
+test_fail_hard_templated_indents:
+  # Test for consumed initial indents and consumed line indents.
+  # https://github.com/sqlfluff/sqlfluff/issues/4230
+  # NOTE: We're using a block indentation indicator because the
+  # test query has initial leading whitespace.
+  # https://yaml.org/spec/1.2.2/#8111-block-indentation-indicator
+  fail_str: |2
+      {%- if true -%}
+    SELECT * FROM {{ "t1" }}
+      {%- endif %}
+  fix_str: |2
+    {%- if true -%}
+        SELECT * FROM {{ "t1" }}
+    {%- endif %}
+
+test_fail_fix_consistency_around_comments:
+  # Check that comments don't make fixes inconsistent.
+  # https://github.com/sqlfluff/sqlfluff/issues/4223
+  fail_str: |
+    select
+        case
+            when a
+            then b
+        end as foo,
+        case
+            when a -- bar
+            then b
+        end as bar
+    from c
+  fix_str: |
+    select
+        case
+            when a
+                then b
+        end as foo,
+        case
+            when a -- bar
+                then b
+        end as bar
+    from c
+
+test_fail_coverage_indent_trough:
+  # This test primarily tests the handling of closing trough indents
+  fail_str: |
+    WITH bar as (SELECT 1
+        FROM foo)
+    SELECT a FROM bar
+  fix_str: |
+    WITH bar as (
+        SELECT 1
+        FROM foo
+    )
+    SELECT a FROM bar
+
+test_pass_combined_comment_impulses:
+  # This tests issue #4252
+  # https://github.com/sqlfluff/sqlfluff/issues/4252
+  pass_str: |
+    WITH cte AS (
+        SELECT *
+        FROM (
+            SELECT *
+            FROM table
+            WHERE
+                NOT bool_column AND NOT bool_column
+                AND some_column >= 1  -- This is a comment
+        )
+    ),
+
+    SELECT *
+    FROM cte
+    ;
+
+    SELECT *
+    FROM table3
+    ;
+
+test_indented_comment_tsql:
+  # TSQL redefines the block_comment. This checks that is done correctly.
+  # https://github.com/sqlfluff/sqlfluff/issues/4249
+  pass_str: |
+    /*
+
+        Author:                     tester
+        Create date:                2021-03-16
+
+    */
+
+    SELECT 1 AS a
+  configs:
+    core:
+      dialect: tsql
+
+test_pass_join_comment_indents_1:
+  # https://github.com/sqlfluff/sqlfluff/issues/4291
+  pass_str: |
+    select * from a
+    left join b
+        -- comment
+        on (a.x = b.x)
+test_pass_join_comment_indents_2:
+  # https://github.com/sqlfluff/sqlfluff/issues/4291
+  pass_str: |
+    select * from a
+    left join b -- comment
+        on (a.x = b.x)
+
+test_comment_effect_indents_default:
+  # https://github.com/sqlfluff/sqlfluff/issues/4294
+  fail_str: |
+    SELECT *
+    FROM table
+    WHERE TRUE -- comment
+    AND TRUE
+  fix_str: |
+    SELECT *
+    FROM table
+    WHERE
+        TRUE -- comment
+        AND TRUE
+
+test_comment_effect_indents_implicit:
+  # https://github.com/sqlfluff/sqlfluff/issues/4294
+  fail_str: |
+    SELECT *
+    FROM table
+    WHERE TRUE -- comment
+    AND TRUE
+  fix_str: |
+    SELECT *
+    FROM table
+    WHERE TRUE -- comment
+        AND TRUE
+  configs:
+    indentation:
+      allow_implicit_indents: true
+
+test_untaken_negative_1:
+  # https://github.com/sqlfluff/sqlfluff/issues/4234
+  fail_str: |
+    CREATE TABLE mytable
+    AS
+    (SELECT
+        id,
+        user_id
+    FROM another_table
+    )
+    ;
+  fix_str: |
+    CREATE TABLE mytable
+    AS
+    (
+        SELECT
+            id,
+            user_id
+        FROM another_table
+    )
+    ;
+
+test_untaken_negative_2:
+  # https://github.com/sqlfluff/sqlfluff/issues/4234
+  fail_str: |
+    WITH m AS (SELECT
+      firstCol
+      , secondCol
+    FROM dbo.myTable
+    )
+
+    SELECT * FROM m
+  fix_str: |
+    WITH m AS (
+        SELECT
+            firstCol
+            , secondCol
+        FROM dbo.myTable
+    )
+
+    SELECT * FROM m
+
+test_untaken_negative_implicit:
+  # NOTE: Check that implicit indents don't
+  # apply before single brackets.
+  pass_str: |
+    SELECT *
+    FROM foo
+    WHERE (
+        a = b
+    )
+    GROUP BY a
+  configs:
+    indentation:
+      allow_implicit_indents: true
+
+test_fail_mixed_tabs_and_spaces:
+  # NOTE: This used to be L002 (rather than L003)
+  fail_str: "SELECT\n \t 1"
+  fix_str: "SELECT\n    1"
+
+test_fix_implicit_indents_4467_a:
+  # https://github.com/sqlfluff/sqlfluff/issues/4467
+  fail_str: |
+    SELECT *
+    FROM d
+    LEFT JOIN l
+      ON d.a = l.a
+        AND d.b = l.b
+  fix_str: |
+    SELECT *
+    FROM d
+    LEFT JOIN l
+        ON d.a = l.a
+            AND d.b = l.b
+  configs:
+    indentation:
+      allow_implicit_indents: true
+
+test_fix_implicit_indents_4467_b:
+  # https://github.com/sqlfluff/sqlfluff/issues/4467
+  pass_str: |
+    SELECT *
+    FROM d
+    LEFT JOIN l
+      ON d.a = l.a
+        AND d.b = l.b
+  configs:
+    indentation:
+      allow_implicit_indents: true
+      tab_space_size: 2
+
+test_fix_macro_indents_4367:
+  # https://github.com/sqlfluff/sqlfluff/issues/4367
+  fail_str: |
+    {% macro my_macro(col) %}
+        {{ col }}
+    {% endmacro %}
+    SELECT
+        something,
+    {{ my_macro("mycol") }},
+        something_else
+    FROM mytable
+  fix_str: |
+    {% macro my_macro(col) %}
+        {{ col }}
+    {% endmacro %}
+    SELECT
+        something,
+        {{ my_macro("mycol") }},
+        something_else
+    FROM mytable
+
+test_fix_untaken_positive_4433:
+  # https://github.com/sqlfluff/sqlfluff/issues/4433
+  fail_str: |
+    CREATE TABLE mytable
+    AS
+    (SELECT
+        id,
+        user_id
+    FROM another_table
+    WHERE
+        TRUE
+    )
+    ;
+  fix_str: |
+    CREATE TABLE mytable
+    AS
+    (
+        SELECT
+            id,
+            user_id
+        FROM another_table
+        WHERE
+            TRUE
+    )
+    ;
+
+test_implicit_case_4542:
+  # https://github.com/sqlfluff/sqlfluff/issues/4542
+  pass_str: |
+    select
+        a,
+        case when b is null then 0 else 1 end as c
+    from my_table;
+  configs:
+    indentation:
+      allow_implicit_indents: true
+
+test_indented_joins_4484:
+  # https://github.com/sqlfluff/sqlfluff/issues/4484
+  pass_str: |
+    select *
+    from table_1
+        inner join table_2
+            on table_1.key = table_2.key
+        inner join table_3
+            on table_2.key = table_3.key
+  configs:
+    indentation:
+      indented_joins: true
+
+test_tsql_where_implicit_4559:
+  # https://github.com/sqlfluff/sqlfluff/issues/4559
+  pass_str: |
+    SELECT t.col1
+    WHERE t.col2 = 'foo'
+        AND t.col3 = 'bar'
+  configs:
+    core:
+      dialect: tsql
+    indentation:
+      allow_implicit_indents: true
+
+test_jinja_nested_tracking:
+  # This tests the caching features of BlockTracker
+  # in the lexer. If that's not functioning properly
+  # the indentation of the nested jinja blocks in this
+  # query will likely fail.
+  pass_str: |
+    SELECT *
+    FROM
+    {% for action in ['a', 'b'] %}
+        {% if loop.first %}
+            {{action}}_var
+        {% else %}
+        JOIN
+            {{action}}_var
+            USING
+                (c, d, e)
+        {% endif %}
+    {% endfor %}
+
+test_configure_no_indent_before_then_4589:
+  # THEN can be configured to not be indented
+  pass_str: |
+    SELECT
+        a,
+        CASE
+            WHEN b >= 42 THEN
+                1
+            ELSE 0
+        END AS c
+    FROM some_table
+  configs:
+    core:
+      dialect: ansi
+    indentation:
+      indented_then: false
+
+test_bigquery_insert_statement_values_clause:
+  pass_str: |
+    INSERT dataset.inventory (product, quantity)
+    VALUES("top load washer", 10);
+  configs:
+    core:
+      dialect: bigquery
+
+test_bigquery_merge_statement_values_clause:
+  fail_str: |
+    MERGE dataset.detailedinventory AS t
+    USING dataset.inventory AS s
+        ON t.product = s.product
+    WHEN NOT MATCHED AND quantity < 20 THEN
+        INSERT (product, quantity, supply_constrained)
+            VALUES (product, quantity, TRUE)
+    WHEN NOT MATCHED THEN
+        INSERT (product, quantity, supply_constrained)
+            VALUES (product, quantity, FALSE);
+  fix_str: |
+    MERGE dataset.detailedinventory AS t
+    USING dataset.inventory AS s
+        ON t.product = s.product
+    WHEN NOT MATCHED AND quantity < 20 THEN
+        INSERT (product, quantity, supply_constrained)
+        VALUES (product, quantity, TRUE)
+    WHEN NOT MATCHED THEN
+        INSERT (product, quantity, supply_constrained)
+        VALUES (product, quantity, FALSE);
+  configs:
+    core:
+      dialect: bigquery
+
+test_fail_issue_4680:
+  # NOTE: It doesn't reindent the second clause, but the important
+  # thing is that we don't get an exception.
+  fail_str: |
+    SELECT col1
+    FROM table
+    WHERE
+      {% if true %}
+        col1 > 1
+      {% else %}
+        col1 > 0
+      {% endif %}
+  fix_str: |
+    SELECT col1
+    FROM table
+    WHERE
+        {% if true %}
+            col1 > 1
+        {% else %}
+        col1 > 0
+      {% endif %}
diff --git a/test/fixtures/rules/std_rule_cases/LT02-tab-space.yml b/test/fixtures/rules/std_rule_cases/LT02-tab-space.yml
new file mode 100644
index 0000000..160f602
--- /dev/null
+++ b/test/fixtures/rules/std_rule_cases/LT02-tab-space.yml
@@ -0,0 +1,82 @@
+rule: LT02
+
+spaces_pass_default:
+  pass_str: "SELECT\n    1"
+
+
+spaces_fail:
+  fail_str: "SELECT\n    1"
+  fix_str: "SELECT\n\t1"
+  configs:
+    rules:
+      indent_unit: tab
+
+
+spaces_fail_custom_tab_space_size:
+  fail_str: "SELECT\n  MAX(\n    a\n  )"
+  fix_str: "SELECT\n\tMAX(\n\t\ta\n\t)"
+  configs:
+    rules:
+      indent_unit: tab
+      tab_space_size: 2
+
+
+tabs_fail_default:
+  fail_str: "SELECT\n\t\t1\n"
+  fix_str: "SELECT\n    1\n"
+
+
+tabs_fail_default_set_tab_space_size:
+  fail_str: "SELECT\n\t\t1\n"
+  fix_str: "SELECT\n  1\n"
+  configs:
+    rules:
+      tab_space_size: 2
+
+
+tabs_pass:
+  pass_str: "SELECT\n\t1"
+  configs:
+    rules:
+      indent_unit: tab
+
+
+indented_comments:
+  pass_str: |
+    SELECT
+        a,         -- Some comment
+        longer_col -- A lined up comment
+    FROM spam
+
+
+indented_comments_default_config:
+  fail_str: |
+    SELECT
+    	a,			-- Some comment
+    	longer_col	-- A lined up comment
+    FROM spam
+  # The rule will only fix the indent before the select targets.
+  # Here tab indent is replaced with spaces.
+  fix_str: |
+    SELECT
+        a,			-- Some comment
+        longer_col	-- A lined up comment
+    FROM spam
+
+
+indented_comments_tab_config:
+  fail_str: |
+    SELECT
+        a,         -- Some comment
+        longer_col -- A lined up comment
+    FROM spam
+  # The rule will only fix the indent before the select targets.
+  # Here spaces indent is replaced with tab.
+  fix_str: |
+    SELECT
+    	a,         -- Some comment
+    	longer_col -- A lined up comment
+    FROM spam
+  configs:
+    rules:
+      indent_unit: tab
diff --git a/test/fixtures/rules/std_rule_cases/L007.yml b/test/fixtures/rules/std_rule_cases/LT03.yml
similarity index 92%
rename from test/fixtures/rules/std_rule_cases/L007.yml
rename to test/fixtures/rules/std_rule_cases/LT03.yml
index cbdd0b8..b82e87d 100644
--- a/test/fixtures/rules/std_rule_cases/L007.yml
+++ b/test/fixtures/rules/std_rule_cases/LT03.yml
@@ -1,4 +1,4 @@
-rule: L007
+rule: LT03
 
 passes_on_before_default:
   pass_str: |
@@ -247,3 +247,17 @@ passes_operator_alone_on_line:
       'asdf'
       ||
       'jklm'
+
+fixes_tuple_error_issue:
+  # https://github.com/sqlfluff/sqlfluff/issues/4184
+  # NB: This one isn't fixable.
+  fail_str: |
+    select * from foo
+    where c is not null and -- comment
+        {% if true -%}a >= b and
+        -- comment.
+        {% endif %}
+        true
+  configs:
+    indentation:
+      template_blocks_indent: false
diff --git a/test/fixtures/rules/std_rule_cases/L019.yml b/test/fixtures/rules/std_rule_cases/LT04.yml
similarity index 92%
rename from test/fixtures/rules/std_rule_cases/L019.yml
rename to test/fixtures/rules/std_rule_cases/LT04.yml
index c180d49..4928e3b 100644
--- a/test/fixtures/rules/std_rule_cases/L019.yml
+++ b/test/fixtures/rules/std_rule_cases/LT04.yml
@@ -1,4 +1,4 @@
-rule: L019
+rule: LT04
 
 leading_comma_violations:
   fail_str: |
@@ -328,3 +328,37 @@ trailing_comma_with_templated_column_2:
         {{ "c1
     " }}, c2 AS days_since
     FROM logs
+
+leading_comma_fix_mixed_indent:
+  # See: https://github.com/sqlfluff/sqlfluff/issues/4255
+  # NOTE: Undisturbed mixed indent.
+  fail_str: |
+    select B
+    	  ,C
+    from A
+  fix_str: |
+    select B,
+    	  C
+    from A
+  configs:
+    layout:
+      type:
+        comma:
+          line_position: trailing
+
+trailing_comma_fix_mixed_indent:
+  # See: https://github.com/sqlfluff/sqlfluff/issues/4255
+  # NOTE: Undisturbed mixed indent.
+  fail_str: |
+    select B,
+    	  C
+    from A
+  fix_str: |
+    select B
+    	  , C
+    from A
+  configs:
+    layout:
+      type:
+        comma:
+          line_position: leading
diff --git a/test/fixtures/rules/std_rule_cases/LT05.yml b/test/fixtures/rules/std_rule_cases/LT05.yml
new file mode 100644
index 0000000..4b59d80
--- /dev/null
+++ b/test/fixtures/rules/std_rule_cases/LT05.yml
@@ -0,0 +1,711 @@
+rule: LT05
+
+test_pass_line_too_long_config_override:
+  # Long lines (with config override)
+  pass_str: "SELECT COUNT(*) FROM tbl\n"
+  configs:
+    core:
+      max_line_length: 30
+
+test_fail_line_too_long_with_comments_1:
+  # Check we move comments correctly
+  fail_str: "SELECT 1 -- Some Comment\n"
+  fix_str: "-- Some Comment\nSELECT 1\n"
+  configs:
+    core:
+      max_line_length: 18
+
+test_fail_line_too_long_with_comments_1_after:
+  # Check we move comments correctly
+  fail_str: "SELECT 1 -- Some Comment\n"
+  fix_str: "SELECT 1\n-- Some Comment\n"
+  configs:
+    core:
+      max_line_length: 18
+    indentation:
+      trailing_comments: after
+
+test_fail_line_too_long_with_comments_1_no_newline:
+  # Check we move comments correctly, and that it
+  # still works when there isn't a trailing newline.
+  # https://github.com/sqlfluff/sqlfluff/issues/4386
+  fail_str: "SELECT 1 -- Some Comment"
+  fix_str: "-- Some Comment\nSELECT 1"
+  configs:
+    core:
+      max_line_length: 18
+
+test_fail_line_too_long_with_comments_2:
+  # Check we can add newlines after dedents (with an indent).
+  # NOTE: That for LT05, we don't repair the initial indent
+  # but that the following lines will be fixed as though it
+  # has been corrected. Ideally LT02 would have been run _first_
+  # on this file.
+  fail_str: "    SELECT COUNT(*) FROM tbl\n"
+  fix_str: "    SELECT COUNT(*)\nFROM tbl\n"
+  configs:
+    core:
+      max_line_length: 20
+
+test_fail_line_too_long_with_comments_3:
+  # Check priority of fixes
+  fail_str: "SELECT COUNT(*) FROM tbl -- Some Comment\n"
+  fix_str: "-- Some Comment\nSELECT COUNT(*)\nFROM tbl\n"
+  configs:
+    core:
+      max_line_length: 18
+
+test_fail_line_too_long_with_comments_4:
+  # In this case, the inline comment is NOT on a line by itself (note the
+  # leading comma), but even if we move it onto a line by itself, it's still
+  # too long. In this case, the rule should do nothing, otherwise it triggers
+  # an endless cycle of "fixes" that simply keeps adding blank lines.
+  fail_str: |
+    SELECT
+    c1
+    ,--  the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line.
+    c2
+  configs:
+    core:
+      max_line_length: 80
+
+test_pass_line_too_long_with_comments_ignore_comment_lines:
+  # Same case as above, but should pass as ignore_comment_lines is set to true
+  pass_str: |
+    SELECT
+    c1
+    ,--  the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line.
+    c2
+  configs:
+    core:
+      max_line_length: 80
+    rules:
+      layout.long_lines:
+        ignore_comment_lines: true
+
+test_fail_line_too_long_only_comments:
+  # Check long lines that are only comments are linted correctly
+  fail_str: "-- Some really long comments on their own line\n\nSELECT 1"
+  configs:
+    core:
+      max_line_length: 18
+
+test_fail_line_too_long_handling_indents:
+  # Check we handle indents nicely
+  fail_str: "SELECT 12345\n"
+  fix_str: "SELECT\n    12345\n"
+  configs:
+    core:
+      max_line_length: 10
+
+test_pass_line_too_long_ignore_comments_true:
+  # Check we can ignore comments if we want
+  pass_str: "SELECT 1\n-- Some long comment over 10 characters\n"
+  configs:
+    core:
+      max_line_length: 10
+    rules:
+      layout.long_lines:
+        ignore_comment_lines: true
+
+test_pass_line_too_long_ignore_comments_false:
+  # Check we still pick up long comments if we don't want to ignore
+  fail_str: "SELECT 1\n-- Some long comment over 10 characters\n"
+  configs:
+    core:
+      max_line_length: 10
+    rules:
+      layout.long_lines:
+        ignore_comment_lines: false
+
+test_compute_line_length_before_template_expansion_1:
+  # Line 3 is fine before expansion. Too long after expansion is NOT considered
+  # a violation.
+  pass_str: |
+    SELECT user_id
+    FROM
+        `{{bi_ecommerce_orders}}` {{table_at_job_start}}
+  configs:
+    core:
+      dialect: bigquery
+    templater:
+      jinja:
+        context:
+          table_at_job_start: FOR SYSTEM_TIME AS OF CAST('2021-03-02T01:22:59+00:00' AS TIMESTAMP)
+          bi_ecommerce_orders: bq-business-intelligence.user.ecommerce_orders
+
+
+test_compute_line_length_before_template_expansion_2:
+  # Line 3 is too long before expansion. It's fine after expansion, but the rule
+  # does not look at that.
+  fail_str: |
+    SELECT user_id
+    FROM
+        `{{bi_ecommerce_orders_bi_ecommerce_orders}}` AS {{table_alias_table_alias_table_alias_table_alias_table_alias_table_alias}}
+  fix_str: |
+    SELECT user_id
+    FROM
+        `{{bi_ecommerce_orders_bi_ecommerce_orders}}`
+            AS {{table_alias_table_alias_table_alias_table_alias_table_alias_table_alias}}
+  violations_after_fix:
+    # Even after fixing, the final line is still too long.
+    - description: Line is too long (86 > 80).
+      line_no: 4
+      line_pos: 9
+      name: layout.long_lines
+  configs:
+    core:
+      dialect: bigquery
+    templater:
+      jinja:
+        context:
+          bi_ecommerce_orders_bi_ecommerce_orders: bq-business-intelligence.user.ecommerce_orders
+          table_alias_table_alias_table_alias_table_alias_table_alias_table_alias: t
+
+
+test_long_jinja_comment:
+  fail_str: |
+    SELECT *
+    {# comment #}
+    {# ........................................................................... #}
+    FROM table
+  configs:
+    core:
+      max_line_length: 80
+    rules:
+      layout.long_lines:
+        ignore_comment_lines: false
+
+
+test_long_jinja_comment_ignore:
+  # A Jinja comment is a comment.
+  pass_str: |
+    SELECT *
+    {# comment #}
+    {# ........................................................................... #}
+    FROM table
+  configs:
+    core:
+      max_line_length: 80
+    rules:
+      layout.long_lines:
+        ignore_comment_lines: true
+
+
+test_for_loop:
+  # A Jinja for loop
+  pass_str: |
+    {% for elem in 'foo' %}
+    SELECT '{{ elem }}' FROM table1;
+    SELECT '{{ elem }}' FROM table2;
+    {% endfor %}
+
+
+test_for_loop_repeating_elements_starts_with_literal:
+  # A Jinja for loop with repeating elements (that are difficult to match)
+  # but starting with a literal that can be used to match
+  pass_str: |
+    {% set elements = 'foo' %}
+    SELECT
+        CASE
+            {% for elem in elements %}
+            WHEN '{{ elem }}' = '' THEN 1
+            WHEN '{{ elem }}' = '' THEN 1
+            {% endfor %}
+        END
+
+
+test_for_loop_starting_with_templated_piece:
+  # A Jinja for loop starting with non-literals
+  # But unique parts can be used to match
+  pass_str: |
+    {% set elements = 'foo' %}
+    {% set when = 'WHEN' %}
+    SELECT
+        CASE
+            {% for elem in elements %}
+            {{ when }} '{{ elem }}' = '' THEN 1
+            {{ when }} '{{ elem }}' = '' THEN 2
+            {% endfor %}
+        END
+
+test_for_loop_fail_complex_match:
+  # A Jinja for loop starting with non-literals
+  # But non-unique parts which therefore cannot
+  # be used to match
+  pass_str: |
+    {% set elements = 'foo' %}
+    {% set when = 'WHEN' %}
+    SELECT
+        CASE
+            {% for elem in elements %}
+            {{ when }} '{{ elem }}' = '' THEN 1
+            {{ when }} '{{ elem }}' = '' THEN 1
+            {% endfor %}
+        END
+
+test_for_loop_fail_simple_match:
+  # If for loop only contains literals it should still pass
+  pass_str: |
+    {% set elements = 'foo' %}
+    SELECT
+        CASE
+            {% for elem in elements %}
+            WHEN 'f' THEN a
+            {% endfor %}
+        END
+
+
+test_set_statement:
+  # A Jinja set statement
+  pass_str: |
+    {% set statement = "SELECT 1 from table1;" %}
+    {{ statement }}{{ statement }}
+  configs:
+    core:
+      max_line_length: 80
+
+
+test_issue_1666_line_too_long_unfixable_jinja:
+  # Note the trailing space at the end of line 1. This is a necessary part of
+  # the test, because the space (which is passed through to the output) was
+  # "tricking" LT05 into trying to split the line, then encountering an internal
+  # error.
+  fail_str: "{{ config (schema='bronze', materialized='view', sort =['id','number'], dist = 'all', tags =['longlonglonglonglong']) }} \n\nselect 1\n"
+
+test_fail_ignore_comment_clauses_1:
+  # Too long, comment clause not ignored
+  fail_str: |
+    CREATE OR REPLACE TABLE mytable (
+        col1 NUMBER COMMENT 'col1 comment',
+        col2 BOOLEAN COMMENT 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length',
+        col3 NUMBER COMMENT 'col3 comment'
+    )
+
+test_fail_ignore_comment_clauses_2:
+  # Too long even after ignoring comment clause
+  fail_str: |
+    CREATE OR REPLACE TABLE mytable (
+        col1 NUMBER COMMENT 'col1 comment',
+        colaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbcccccccccccccccddddddddddddddddeeeeeeeeeeeeeee2 BOOLEAN COMMENT 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length',
+        col3 NUMBER COMMENT 'col3 comment'
+    )
+  configs:
+    rules:
+      layout.long_lines:
+        ignore_comment_clauses: true
+
+test_pass_ignore_comment_clauses:
+  pass_str: |
+    CREATE OR REPLACE TABLE mytable (
+        col1 NUMBER COMMENT 'col1 comment',
+        col2 BOOLEAN COMMENT 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length',
+        col3 NUMBER COMMENT 'col3 comment'
+    )
+  configs:
+    rules:
+      layout.long_lines:
+        ignore_comment_clauses: true
+
+test_pass_ignore_comment_clauses_teradata:
+  pass_str: |
+    comment on table sandbox_db.Org_Descendant is 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length';
+  configs:
+    core:
+      dialect: teradata
+    rules:
+      layout.long_lines:
+        ignore_comment_clauses: true
+
+test_pass_ignore_comment_clauses_exasol:
+  pass_str: |
+    CREATE TABLE IF NOT EXISTS SCHEM.TAB (
+        ID DECIMAL(18, 0) IDENTITY CONSTRAINT PRIMARY KEY DISABLE COMMENT IS 'without constraint name'
+    ) COMMENT IS 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length';
+  configs:
+    core:
+      dialect: exasol
+    rules:
+      layout.long_lines:
+        ignore_comment_clauses: true
+
+test_pass_ignore_comment_clauses_snowflake:
+  pass_str: |
+    CREATE TABLE foo_table (bar INTEGER) COMMENT = 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length'
+  configs:
+    core:
+      dialect: snowflake
+    rules:
+      layout.long_lines:
+        ignore_comment_clauses: true
+
+test_pass_ignore_comment_clauses_postgres:
+  pass_str: |
+    CREATE TABLE IF NOT EXISTS foo
+    ( id UUID DEFAULT uuid_generate_v4() PRIMARY KEY,
+      name TEXT NOT NULL
+    );
+
+    COMMENT ON TABLE foo IS 'Windows Phone 8, however, was never able to overcome a long string of disappointments for Microsoft. ';
+  configs:
+    core:
+      dialect: postgres
+    rules:
+      layout.long_lines:
+        ignore_comment_clauses: true
+
+test_fail_templated_comment_line:
+  fail_str: |
+    SELECT *
+        {# ........................................................................... #}
+    FROM table
+  configs:
+    templater:
+      jinja:
+        context: {}
+
+test_pass_ignore_templated_comment_lines:
+  # NOTE: This is potentially a behaviour change in 2.0.0.
+  # This was erroneously using the `ignore_comment_clauses`
+  # config when this query contains no comment clauses.
+  pass_str: |
+    SELECT *
+        {# ........................................................................... #}
+    FROM table
+  configs:
+    rules:
+      layout.long_lines:
+        ignore_comment_lines: true
+    templater:
+      jinja:
+        context: {}
+
+test_fail_operator_precedence_1:
+  # Make sure we split at the + operator.
+  fail_str: |
+    select
+        ISNULL(count, '0') * 10000 + ISNULL(planned, 100)
+    from blah
+  fix_str: |
+    select
+        ISNULL(count, '0') * 10000
+        + ISNULL(planned, 100)
+    from blah
+  configs:
+    core:
+      max_line_length: 30
+
+test_fail_operator_precedence_2:
+  # Make sure we split at the AND operator.
+  fail_str: |
+    select
+        recommendation_list[ORDINAL(1)] = 'uses_small_subject_line' AND uses_small_subject_line != CAST(effect_size_list[ORDINAL(1)] AS FLOAT64)
+    from blah
+  fix_str: |
+    select
+        recommendation_list[ORDINAL(1)] = 'uses_small_subject_line'
+        AND uses_small_subject_line != CAST(effect_size_list[ORDINAL(1)] AS FLOAT64)
+    from blah
+  configs:
+    core:
+      max_line_length: 120
+
+test_fail_operator_precedence_3:
+  # Stretching cases for operators and comma
+  fail_str: |
+    select
+        a, b + c, long_name + long_name * long_name - long_name as foo, long_name AND long_name OR long_name OR long_name as bar
+    from blah
+  fix_str: |
+    select
+        a,
+        b + c,
+        long_name
+        + long_name * long_name
+        - long_name as foo,
+        long_name AND long_name
+        OR long_name
+        OR long_name as bar
+    from blah
+  configs:
+    core:
+      max_line_length: 30
+
+test_pass_long_multiline_jinja:
+  # None of the lines are longer than 30
+  # but the whole tag is. It shouldn't
+  # cause issues.
+  pass_str: |
+    select
+        {{
+          1 + 2 + 3 + 4 + 5
+          + 6 + 7 + 8 + 9 + 10
+        }}
+    from blah
+  configs:
+    core:
+      max_line_length: 30
+
+test_fail_long_inline_statement:
+  # Tests that breaks happen between clauses properly
+  fail_str: |
+    select distinct a + b from c join d using (e) where f = g and h = i order by j
+  fix_str: |
+    select distinct a + b
+    from c
+    join d using (e)
+    where f = g and h = i
+    order by j
+  configs:
+    core:
+      max_line_length: 50
+
+test_pass_check_off_1:
+  # Tests that we can disable the check (using 0).
+  pass_str: |
+    select my_really_really_really_really_really_really_really_really_really_really_really_long_var from tbl
+  configs:
+    core:
+      max_line_length: 0
+
+test_pass_check_off_2:
+  # Tests that we can disable the check (using -1).
+  pass_str: |
+    select my_really_really_really_really_really_really_really_really_really_really_really_long_var from tbl
+  configs:
+    core:
+      max_line_length: -1
+
+test_comment_move_mid_query:
+  fail_str: |
+    select
+        my_long_long_line as foo -- with some comment
+    from foo
+  fix_str: |
+    select
+        -- with some comment
+        my_long_long_line as foo
+    from foo
+  configs:
+    core:
+      max_line_length: 40
+
+test_fix_implicit_indent:
+  # Test for ImplicitIndent.
+  # The theoretical indent between WHERE and "a" is implicit.
+  fail_str: |
+    SELECT
+        CASE
+            WHEN longer_and_longer AND much_much_much_longer
+                THEN longer_and_longer AND much_much_much_longer
+            ELSE longer_and_longer AND much_much_much_longer
+        END as foobar,
+        CASE WHEN a THEN b END as bar
+    FROM foo
+    WHERE a_really_long_field AND a_nother_really_long_field
+    HAVING a_really_long_field AND a_nother_really_long_field
+  fix_str: |
+    SELECT
+        CASE
+            WHEN longer_and_longer
+                AND much_much_much_longer
+                THEN longer_and_longer
+                    AND much_much_much_longer
+            ELSE longer_and_longer
+                AND much_much_much_longer
+        END as foobar,
+        CASE WHEN a THEN b END as bar
+    FROM foo
+    WHERE a_really_long_field
+        AND a_nother_really_long_field
+    HAVING a_really_long_field
+        AND a_nother_really_long_field
+  configs:
+    core:
+      max_line_length: 45
+    indentation:
+      allow_implicit_indents: true
+
+test_fix_no_implicit_indent:
+  # Test explicitly preventing implicit indents.
+  fail_str: |
+    SELECT
+        CASE
+            WHEN longer_and_longer AND much_much_much_longer
+                THEN longer_and_longer AND much_much_much_longer
+            ELSE longer_and_longer AND much_much_much_longer
+        END as foobar,
+        CASE WHEN a THEN b END as bar
+    FROM foo
+    WHERE a_really_long_field AND a_nother_really_long_field
+    HAVING a_really_long_field AND a_nother_really_long_field
+  fix_str: |
+    SELECT
+        CASE
+            WHEN
+                longer_and_longer
+                AND much_much_much_longer
+                THEN
+                    longer_and_longer
+                    AND much_much_much_longer
+            ELSE
+                longer_and_longer
+                AND much_much_much_longer
+        END as foobar,
+        CASE WHEN a THEN b END as bar
+    FROM foo
+    WHERE
+        a_really_long_field
+        AND a_nother_really_long_field
+    HAVING
+        a_really_long_field
+        AND a_nother_really_long_field
+  configs:
+    core:
+      max_line_length: 45
+    indentation:
+      allow_implicit_indents: false
+
+test_fix_window_function:
+  # https://github.com/sqlfluff/sqlfluff/issues/4292
+  fail_str: |
+    select *
+    from t
+    qualify a = coalesce(
+        first_value(iff(b = 'none', null, a)) ignore nulls over (partition by c order by d desc),
+        first_value(a) respect nulls over (partition by c order by d desc)
+    )
+  fix_str: |
+    select *
+    from t
+    qualify a = coalesce(
+        first_value(
+            iff(b = 'none', null, a)
+        ) ignore nulls
+            over (partition by c order by d desc),
+        first_value(a) respect nulls
+            over (partition by c order by d desc)
+    )
+  configs:
+    core:
+      max_line_length: 50
+      dialect: snowflake
+
+test_fail_do_not_fix_noqa:
+  # https://github.com/sqlfluff/sqlfluff/issues/4248
+  # NOTE: No fix_str, because this should be unfixable.
+  fail_str: |
+    SELECT
+        col1,
+        col2,
+        col3
+    FROM
+        really_really_really_really_really_really_long_schema_name.TABLE1 -- noqa: L014
+
+test_operator_precedence:
+  fail_str: |
+    SELECT *
+    FROM foo
+    left join abcdef_abcd_details
+        on foo.abcdefgh_id = abcdef_abcd_details.abcdefgh_id and abcdef_abcd_details.abcdef_abcdef_abcdef_abcdef = 1
+  fix_str: |
+    SELECT *
+    FROM foo
+    left join abcdef_abcd_details
+        on
+            foo.abcdefgh_id = abcdef_abcd_details.abcdefgh_id
+            and abcdef_abcd_details.abcdef_abcdef_abcdef_abcdef = 1
+  configs:
+    core:
+      max_line_length: 100
+      dialect: snowflake
+
+test_long_functions_and_aliases:
+  # https://github.com/sqlfluff/sqlfluff/issues/4033
+  fail_str: |
+    SELECT
+        my_function(col1 + col2, arg2, arg3) over (partition by col3, col4 order by col5 rows between unbounded preceding and current row) as my_relatively_long_alias,
+        my_other_function(col6, col7 + col8, arg4) as my_other_relatively_long_alias,
+        my_expression_function(col6, col7 + col8, arg4) = col9 + col10 as another_relatively_long_alias
+    FROM my_table
+  fix_str: |
+    SELECT
+        my_function(col1 + col2, arg2, arg3)
+            over (
+                partition by col3, col4
+                order by col5 rows between unbounded preceding and current row
+            )
+            as my_relatively_long_alias,
+        my_other_function(col6, col7 + col8, arg4)
+            as my_other_relatively_long_alias,
+        my_expression_function(col6, col7 + col8, arg4)
+        = col9 + col10 as another_relatively_long_alias
+    FROM my_table
+
+test_order_by_rebreak_span:
+  # This tests that we can correctly rebreak an "order by" expressions.
+  fail_str: |
+    select * from
+        (
+            select
+                tbl1.*,
+                row_number() over (
+                    partition by tbl1.the_name,  {{ ['a', 'b', 'c', 'd'] | join(', ') }} order by created_at desc
+                ) rnk
+            from foo
+            inner join tbl2
+                on tbl1.the_name = tbl2.the_name
+        )
+  fix_str: |
+    select * from
+        (
+            select
+                tbl1.*,
+                row_number() over (
+                    partition by
+                        tbl1.the_name,  {{ ['a', 'b', 'c', 'd'] | join(', ') }}
+                    order by created_at desc
+                ) rnk
+            from foo
+            inner join tbl2
+                on tbl1.the_name = tbl2.the_name
+        )
+
+test_trailing_semicolon_moves:
+  # The checks that we don't move the semicolon or the comma.
+  fail_str: |
+    SELECT my_very_long_field,
+    FROM foo
+    ORDER BY my_very_long_field;
+  fix_str: |
+    SELECT
+        my_very_long_field,
+    FROM foo
+    ORDER BY
+        my_very_long_field;
+  configs:
+    core:
+      dialect: bigquery
+      max_line_length: 20
+  # After fixing there are still issues, but we're still keeping
+  # the comma and semicolon where they are.
+  violations_after_fix:
+    - description: Line is too long (23 > 20).
+      line_no: 2
+      line_pos: 5
+      name: layout.long_lines
+    - description: Line is too long (23 > 20).
+      line_no: 5
+      line_pos: 5
+      name: layout.long_lines
+
+test_pass_window_function:
+  # Test that we don't flag too eagerly on window functions.
+  pass_str: |
+    select
+        col,
+        rank() over (
+            partition by a, b, c
+            order by d desc
+        ) as rnk
+    from foo
diff --git a/test/fixtures/rules/std_rule_cases/L017.yml b/test/fixtures/rules/std_rule_cases/LT06.yml
similarity index 96%
rename from test/fixtures/rules/std_rule_cases/L017.yml
rename to test/fixtures/rules/std_rule_cases/LT06.yml
index e696d5e..38bbd23 100644
--- a/test/fixtures/rules/std_rule_cases/L017.yml
+++ b/test/fixtures/rules/std_rule_cases/LT06.yml
@@ -1,4 +1,4 @@
-rule: L017
+rule: LT06
 
 passing_example:
   pass_str: SELECT SUM(1)
diff --git a/test/fixtures/rules/std_rule_cases/L018.yml b/test/fixtures/rules/std_rule_cases/LT07.yml
similarity index 85%
rename from test/fixtures/rules/std_rule_cases/L018.yml
rename to test/fixtures/rules/std_rule_cases/LT07.yml
index 259db9a..1a5e4d8 100644
--- a/test/fixtures/rules/std_rule_cases/L018.yml
+++ b/test/fixtures/rules/std_rule_cases/LT07.yml
@@ -1,4 +1,4 @@
-rule: L018
+rule: LT07
 
 test_pass_with_clause_closing_aligned:
   # with statement indentation
@@ -65,7 +65,6 @@ test_pass_with_clause_closing_misaligned_indentation_in_templated_block:
     {% endif %}
     select * from cte
 
-
 test_move_parenthesis_to_next_line_in_templated_block:
   fail_str: |
     with
@@ -83,3 +82,21 @@ test_move_parenthesis_to_next_line_in_templated_block:
     )
     {% endif %}
     select * from cte
+
+test_pass_templated_clauses:
+  pass_str: |
+    with
+
+    {% for tbl in ['a', 'b'] %}
+        {{ tbl }} as (
+            SELECT 1
+        ),
+    {% endfor %}
+
+    final as (
+        SELECT 1
+    )
+
+    select * from final
+    join a using (x)
+    join b using (x)
diff --git a/test/fixtures/rules/std_rule_cases/L022.yml b/test/fixtures/rules/std_rule_cases/LT08.yml
similarity index 98%
rename from test/fixtures/rules/std_rule_cases/L022.yml
rename to test/fixtures/rules/std_rule_cases/LT08.yml
index 1776882..ce9ed6e 100644
--- a/test/fixtures/rules/std_rule_cases/L022.yml
+++ b/test/fixtures/rules/std_rule_cases/LT08.yml
@@ -1,7 +1,7 @@
-rule: L022
+rule: LT08
 
 test_pass_blank_line_after_cte_trailing_comma:
-  # Test cases for L022, both leading and trailing commas.
+  # Test cases for LT08, both leading and trailing commas.
   pass_str: |
     with my_cte as (
         select 1
diff --git a/test/fixtures/rules/std_rule_cases/L036.yml b/test/fixtures/rules/std_rule_cases/LT09.yml
similarity index 94%
rename from test/fixtures/rules/std_rule_cases/L036.yml
rename to test/fixtures/rules/std_rule_cases/LT09.yml
index 608b158..1fb0763 100644
--- a/test/fixtures/rules/std_rule_cases/L036.yml
+++ b/test/fixtures/rules/std_rule_cases/LT09.yml
@@ -1,4 +1,4 @@
-rule: L036
+rule: LT09
 
 test_single_select_target_and_no_newline_between_select_and_select_target:
   pass_str: select a from x
@@ -12,7 +12,7 @@ test_single_wildcard_select_target_and_no_newline_between_select_and_select_targ
     from x
   configs:
     rules:
-      L036:
+      layout.select_targets:
         wildcard_policy: multiple
 
 test_single_wildcard_select_target_and_no_newline_between_select_and_select_target_2:
@@ -34,7 +34,7 @@ test_single_select_target_and_newline_after_select_target_2:
     from x
   configs:
     rules:
-      L036:
+      layout.select_targets:
         wildcard_policy: multiple
 
 test_single_select_target_and_newline_before_select_target:
@@ -66,7 +66,7 @@ test_single_wildcard_select_target_and_newline_before_select_target_2:
     from x
   configs:
     rules:
-      L036:
+      layout.select_targets:
         wildcard_policy: multiple
 
 test_single_wildcard_select_target_and_newline_before_select_target_plus_from_on_same_line_1:
@@ -79,7 +79,7 @@ test_single_wildcard_select_target_and_newline_before_select_target_plus_from_on
     from x
   configs:
     rules:
-      L036:
+      layout.select_targets:
         wildcard_policy: multiple
 
 test_single_wildcard_select_target_and_newline_before_select_target_plus_from_on_same_line_2:
@@ -330,3 +330,14 @@ test_create_view:
     SELECT c
     FROM table1
     INNER JOIN table2 ON (table1.id = table2.id);
+
+test_multiline_single:
+  # https://github.com/sqlfluff/sqlfluff/issues/4516
+  pass_str: |
+    SELECT
+        SUM(
+            1 + SUM(
+                2 + 3
+            )
+        ) AS col
+    FROM test_table
diff --git a/test/fixtures/rules/std_rule_cases/L041.yml b/test/fixtures/rules/std_rule_cases/LT10.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L041.yml
rename to test/fixtures/rules/std_rule_cases/LT10.yml
index ad86194..383e537 100644
--- a/test/fixtures/rules/std_rule_cases/L041.yml
+++ b/test/fixtures/rules/std_rule_cases/LT10.yml
@@ -1,4 +1,4 @@
-rule: L041
+rule: LT10
 
 test_fail_distinct_on_next_line_1:
   fail_str: |
diff --git a/test/fixtures/rules/std_rule_cases/L065.yml b/test/fixtures/rules/std_rule_cases/LT11.yml
similarity index 90%
rename from test/fixtures/rules/std_rule_cases/L065.yml
rename to test/fixtures/rules/std_rule_cases/LT11.yml
index 1dc6cda..9ad3ce8 100644
--- a/test/fixtures/rules/std_rule_cases/L065.yml
+++ b/test/fixtures/rules/std_rule_cases/LT11.yml
@@ -1,4 +1,4 @@
-rule: L065
+rule: LT11
 
 test_fail_simple_fix_union_all_before:
   fail_str: |
@@ -36,8 +36,8 @@ test_pass_multiple_newlines_are_allowed:
 
       SELECT 'b'
 
-# The autofix of L065 doesn't respect indentation of the surrounding query.
-# Hence, the fix result of only L065 looks ugly. But L003 will fix the indentation
+# The autofix of LT11 doesn't respect indentation of the surrounding query.
+# Hence, the fix result of only LT11 looks ugly. But LT02 will fix the indentation
 # in a second step.
 # See the test blow.
 test_fail_fix_works_in_subqueries:
@@ -56,8 +56,8 @@ test_fail_fix_works_in_subqueries:
           SELECT 'j'
       )
 
-# Test autofix after L003 passes L065
-test_pass_fix_works_in_subqueries_after_L003_fix:
+# Test autofix after LT02 passes LT11
+test_pass_fix_works_in_subqueries_after_LT02_fix:
   pass_str: |
       SELECT * FROM (
           SELECT 'g'
diff --git a/test/fixtures/rules/std_rule_cases/L009.yml b/test/fixtures/rules/std_rule_cases/LT12.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L009.yml
rename to test/fixtures/rules/std_rule_cases/LT12.yml
index 50f9e28..86cc0ba 100644
--- a/test/fixtures/rules/std_rule_cases/L009.yml
+++ b/test/fixtures/rules/std_rule_cases/LT12.yml
@@ -1,4 +1,4 @@
-rule: L009
+rule: LT12
 
 test_pass_single_final_newline:
   pass_str: "SELECT foo FROM bar\n"
diff --git a/test/fixtures/rules/std_rule_cases/L050.yml b/test/fixtures/rules/std_rule_cases/LT13.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L050.yml
rename to test/fixtures/rules/std_rule_cases/LT13.yml
index 25fef99..575e372 100644
--- a/test/fixtures/rules/std_rule_cases/L050.yml
+++ b/test/fixtures/rules/std_rule_cases/LT13.yml
@@ -1,4 +1,4 @@
-rule: L050
+rule: LT13
 
 test_pass_leading_whitespace_statement:
   pass_str: "SELECT foo FROM bar\n"
diff --git a/test/fixtures/rules/std_rule_cases/README.md b/test/fixtures/rules/std_rule_cases/README.md
index 721b871..9319d05 100644
--- a/test/fixtures/rules/std_rule_cases/README.md
+++ b/test/fixtures/rules/std_rule_cases/README.md
@@ -42,7 +42,7 @@ test_keyword_as_identifier:
 
   configs:
     rules:
-      L029:
+      references.keywords:
         only_aliases: false
 ```
 
diff --git a/test/fixtures/rules/std_rule_cases/L026.yml b/test/fixtures/rules/std_rule_cases/RF01.yml
similarity index 98%
rename from test/fixtures/rules/std_rule_cases/L026.yml
rename to test/fixtures/rules/std_rule_cases/RF01.yml
index 28e7711..926a221 100644
--- a/test/fixtures/rules/std_rule_cases/L026.yml
+++ b/test/fixtures/rules/std_rule_cases/RF01.yml
@@ -1,4 +1,4 @@
-rule: L026
+rule: RF01
 
 test_pass_object_referenced_1:
   # References in quotes in bigquery
@@ -8,7 +8,7 @@ test_pass_object_referenced_1:
     core:
       dialect: bigquery
     rules:
-      L026:
+      references.from:
         force_enable: true
 
 test_fail_object_not_referenced_1:
@@ -19,7 +19,7 @@ test_fail_object_not_referenced_1:
     core:
       dialect: bigquery
     rules:
-      L026:
+      references.from:
         force_enable: true
 
 test_fail_object_not_referenced_2:
@@ -47,7 +47,7 @@ test_pass_object_referenced_4:
     core:
       dialect: bigquery
     rules:
-      L026:
+      references.from:
         force_enable: true
 
 test_pass_object_referenced_5a:
@@ -78,7 +78,7 @@ test_pass_object_referenced_5c:
     core:
       dialect: bigquery
     rules:
-      L026:
+      references.from:
         force_enable: true
 
 test_pass_object_referenced_5d:
@@ -179,7 +179,7 @@ test_pass_bigquery_dash:
     core:
       dialect: bigquery
     rules:
-      L026:
+      references.from:
         force_enable: true
 
 test_pass_exasol_select_into:
diff --git a/test/fixtures/rules/std_rule_cases/L027.yml b/test/fixtures/rules/std_rule_cases/RF02.yml
similarity index 98%
rename from test/fixtures/rules/std_rule_cases/L027.yml
rename to test/fixtures/rules/std_rule_cases/RF02.yml
index 682ca9f..52c3a86 100644
--- a/test/fixtures/rules/std_rule_cases/L027.yml
+++ b/test/fixtures/rules/std_rule_cases/RF02.yml
@@ -1,4 +1,4 @@
-rule: L027
+rule: RF02
 
 test_pass_qualified_references_multi_table_statements:
   pass_str: |
@@ -326,7 +326,7 @@ test_pass_ignore_words_column_name:
         ON TRUE
   configs:
     rules:
-      L027:
+      references.qualification:
         ignore_words: test1,test2
 
 test_pass_ignore_words_regex_column_name:
@@ -337,7 +337,7 @@ test_pass_ignore_words_regex_column_name:
         ON TRUE
   configs:
     rules:
-      L027:
+      references.qualification:
         ignore_words_regex: ^_
 
 test_pass_ignore_words_regex_bigquery_declare_example:
@@ -351,7 +351,7 @@ test_pass_ignore_words_regex_bigquery_declare_example:
     core:
       dialect: bigquery
     rules:
-      L027:
+      references.qualification:
         ignore_words_regex: ^_
 
 test_pass_redshift:
diff --git a/test/fixtures/rules/std_rule_cases/L028.yml b/test/fixtures/rules/std_rule_cases/RF03.yml
similarity index 96%
rename from test/fixtures/rules/std_rule_cases/L028.yml
rename to test/fixtures/rules/std_rule_cases/RF03.yml
index d573f4c..9018d56 100644
--- a/test/fixtures/rules/std_rule_cases/L028.yml
+++ b/test/fixtures/rules/std_rule_cases/RF03.yml
@@ -1,4 +1,4 @@
-rule: L028
+rule: RF03
 
 # Mixed qualification of references.
 test_fail_single_table_mixed_qualification_of_references:
@@ -39,7 +39,7 @@ test_fail_single_table_reference_when_unqualified_config:
   fix_str: SELECT bar FROM my_tbl
   configs:
     rules:
-      L028:
+      references.consistent:
         single_table_references: unqualified
 
 test_fail_single_table_reference_when_qualified_config:
@@ -47,7 +47,7 @@ test_fail_single_table_reference_when_qualified_config:
   fix_str: SELECT my_tbl.bar FROM my_tbl WHERE my_tbl.foo
   configs:
     rules:
-      L028:
+      references.consistent:
         single_table_references: qualified
 
 test_pass_single_table_reference_in_subquery:
@@ -69,7 +69,7 @@ test_value_table_functions_do_not_require_qualification:
     core:
       dialect: bigquery
     rules:
-      L028:
+      references.consistent:
         force_enable: true
 
 test_object_references_1a:
@@ -94,7 +94,7 @@ test_object_references_1c:
     core:
       dialect: bigquery
     rules:
-      L028:
+      references.consistent:
         force_enable: true
         single_table_references: qualified
 
@@ -105,7 +105,7 @@ test_object_references_1d:
     core:
       dialect: bigquery
     rules:
-      L028:
+      references.consistent:
         force_enable: true
 
 test_object_references_1e:
@@ -114,7 +114,7 @@ test_object_references_1e:
     core:
       dialect: bigquery
     rules:
-      L028:
+      references.consistent:
         force_enable: true
 
 test_object_references_struct_inconsistent_fix_a:
@@ -124,7 +124,7 @@ test_object_references_struct_inconsistent_fix_a:
     core:
       dialect: bigquery
     rules:
-      L028:
+      references.consistent:
         force_enable: true
 
 test_object_references_1f:
@@ -236,7 +236,7 @@ test_fail_select_alias_in_where_clause_5:
     where alias_col1 > 5
   configs:
     rules:
-      L028:
+      references.consistent:
         single_table_references: unqualified
 
 test_pass_tsql_parameter:
@@ -300,7 +300,7 @@ passes_tql_table_variable:
     core:
       dialect: tsql
     rules:
-      L028:
+      references.consistent:
         single_table_references: qualified
 
 fail_but_dont_fix_templated_table_name_consistent:
@@ -318,5 +318,5 @@ fail_but_dont_fix_templated_table_name_qualified:
     FROM {{ "foo" }}
   configs:
     rules:
-      L028:
+      references.consistent:
         single_table_references: qualified
diff --git a/test/fixtures/rules/std_rule_cases/L029.yml b/test/fixtures/rules/std_rule_cases/RF04.yml
similarity index 88%
rename from test/fixtures/rules/std_rule_cases/L029.yml
rename to test/fixtures/rules/std_rule_cases/RF04.yml
index b8688b9..deabbd8 100644
--- a/test/fixtures/rules/std_rule_cases/L029.yml
+++ b/test/fixtures/rules/std_rule_cases/RF04.yml
@@ -1,4 +1,4 @@
-rule: L029
+rule: RF04
 
 test_pass_valid_identifier:
   pass_str: CREATE TABLE artist(artist_name TEXT)
@@ -20,28 +20,28 @@ test_fail_keyword_as_identifier_not_alias_all:
   fail_str: SELECT parameter
   configs:
     rules:
-      L029:
+      references.keywords:
         unquoted_identifiers_policy: all
 
 test_pass_valid_identifier_table_alias_column_alias_config:
   pass_str: SELECT x FROM tbl AS parameter
   configs:
     rules:
-      L029:
+      references.keywords:
         unquoted_identifiers_policy: column_aliases
 
 test_fail_keyword_as_identifier_column_alias_config:
   fail_str: SELECT x AS date FROM tbl AS parameter
   configs:
     rules:
-      L029:
+      references.keywords:
         unquoted_identifiers_policy: column_aliases
 
 test_pass_valid_quoted_identifier:
   pass_str: CREATE TABLE [artist]([artist_name] TEXT)
   configs:
     rules:
-      L029:
+      references.keywords:
         quoted_identifiers_policy: aliases
     core:
       dialect: tsql
@@ -50,21 +50,21 @@ test_fail_keyword_as_quoted_identifier_column:
   fail_str: CREATE TABLE "artist"("create" TEXT)
   configs:
     rules:
-      L029:
+      references.keywords:
         quoted_identifiers_policy: aliases
 
 test_pass_keyword_as_quoted_identifier_column_none_policy:
   pass_str: CREATE TABLE "artist"("create" TEXT)
   configs:
     rules:
-      L029:
+      references.keywords:
         quoted_identifiers_policy: none
 
 test_fail_keyword_as_quoted_identifier_column_alias:
   fail_str: SELECT 1 as [parameter]
   configs:
     rules:
-      L029:
+      references.keywords:
         quoted_identifiers_policy: aliases
     core:
       dialect: tsql
@@ -73,7 +73,7 @@ test_fail_keyword_as_quoted_identifier_table_alias:
   fail_str: SELECT [x] FROM [tbl] AS [parameter]
   configs:
     rules:
-      L029:
+      references.keywords:
         quoted_identifiers_policy: aliases
     core:
       dialect: tsql
@@ -83,7 +83,7 @@ test_pass_valid_quoted_identifier_not_alias:
   pass_str: SELECT [parameter]
   configs:
     rules:
-      L029:
+      references.keywords:
         quoted_identifiers_policy: aliases
     core:
       dialect: tsql
@@ -92,7 +92,7 @@ test_fail_keyword_as_quoted_identifier_not_alias_all:
   fail_str: SELECT [parameter]
   configs:
     rules:
-      L029:
+      references.keywords:
         quoted_identifiers_policy: all
     core:
       dialect: tsql
@@ -101,7 +101,7 @@ test_pass_valid_quoted_identifier_table_alias_column_alias_config:
   pass_str: SELECT [x] FROM [tbl] AS [parameter]
   configs:
     rules:
-      L029:
+      references.keywords:
         quoted_identifiers_policy: column_aliases
     core:
       dialect: tsql
@@ -110,7 +110,7 @@ test_fail_keyword_as_quoted_identifier_column_alias_config:
   fail_str: SELECT [x] AS [date] FROM [tbl] AS [parameter]
   configs:
     rules:
-      L029:
+      references.keywords:
         quoted_identifiers_policy: column_aliases
     core:
       dialect: tsql
@@ -119,7 +119,7 @@ test_pass_ignore_word1:
   pass_str: CREATE TABLE artist(create TEXT)
   configs:
     rules:
-      L029:
+      references.keywords:
         ignore_words: create
 
 
@@ -127,14 +127,14 @@ test_pass_ignore_word2:
   pass_str: SELECT col1 AS date FROM table1
   configs:
     rules:
-      L029:
+      references.keywords:
         ignore_words: date
 
 test_pass_ignore_words_regex1:
   pass_str: CREATE TABLE artist(create TEXT)
   configs:
     rules:
-      L029:
+      references.keywords:
         ignore_words_regex: ^cr
 
 
@@ -142,7 +142,7 @@ test_pass_ignore_words_regex2:
   pass_str: SELECT col1 AS date FROM table1
   configs:
     rules:
-      L029:
+      references.keywords:
         ignore_words_regex: ^da
 
 test_pass_one_character_identifier:
diff --git a/test/fixtures/rules/std_rule_cases/L057.yml b/test/fixtures/rules/std_rule_cases/RF05.yml
similarity index 92%
rename from test/fixtures/rules/std_rule_cases/L057.yml
rename to test/fixtures/rules/std_rule_cases/RF05.yml
index 0cd4ecb..4b54290 100644
--- a/test/fixtures/rules/std_rule_cases/L057.yml
+++ b/test/fixtures/rules/std_rule_cases/RF05.yml
@@ -1,4 +1,4 @@
-rule: L057
+rule: RF05
 
 test_fail_special_chars_create_table_space:
   fail_str:
@@ -85,7 +85,7 @@ test_pass_special_chars_create_table_space_allowed:
     )
   configs:
     rules:
-      L057:
+      references.special_chars:
         allow_space_in_identifier: true
 
 test_fail_special_chars_quoted_policy_alias:
@@ -94,7 +94,7 @@ test_fail_special_chars_quoted_policy_alias:
     FROM DBO.ColumnNames as "alias with space"
   configs:
     rules:
-      L057:
+      references.special_chars:
         quoted_identifiers_policy: aliases
 
 test_fail_special_chars_quoted_policy_column_alias:
@@ -103,7 +103,7 @@ test_fail_special_chars_quoted_policy_column_alias:
     FROM DBO.ColumnNames
   configs:
     rules:
-      L057:
+      references.special_chars:
         quoted_identifiers_policy: column_aliases
 
 test_fail_special_chars_unquoted_policy_aliases:
@@ -112,7 +112,7 @@ test_fail_special_chars_unquoted_policy_aliases:
     FROM DBO.ColumnNames as aliashash#
   configs:
     rules:
-      L057:
+      references.special_chars:
         unquoted_identifiers_policy: aliases
     core:
       dialect: tsql
@@ -123,7 +123,7 @@ test_fail_special_chars_unquoted_policy_column_aliases:
     FROM DBO.ColumnNames
   configs:
     rules:
-      L057:
+      references.special_chars:
         unquoted_identifiers_policy: column_aliases
     core:
       dialect: tsql
@@ -134,7 +134,7 @@ test_pass_special_chars_quoted_policy_aliases:
     FROM DBO.ColumnNames
   configs:
     rules:
-      L057:
+      references.special_chars:
         quoted_identifiers_policy: aliases
 
 test_pass_special_chars_quoted_policy_none:
@@ -143,7 +143,7 @@ test_pass_special_chars_quoted_policy_none:
     FROM DBO.ColumnNames
   configs:
     rules:
-      L057:
+      references.special_chars:
         quoted_identifiers_policy: none
 
 test_pass_special_chars_unquoted_policy_aliases:
@@ -152,7 +152,7 @@ test_pass_special_chars_unquoted_policy_aliases:
     FROM DBO.ColumnNames
   configs:
     rules:
-      L057:
+      references.special_chars:
         unquoted_identifiers_policy: aliases
     core:
       dialect: tsql
@@ -163,7 +163,7 @@ test_pass_special_chars_unquoted_policy_column_aliases:
     FROM DBO.ColumnNames as AliasHash#
   configs:
     rules:
-      L057:
+      references.special_chars:
         unquoted_identifiers_policy: column_aliases
     core:
       dialect: tsql
@@ -174,7 +174,7 @@ test_pass_special_chars_quoted_policy_column_aliases:
     FROM DBO.ColumnNames as "alias with space"
   configs:
     rules:
-      L057:
+      references.special_chars:
         quoted_identifiers_policy: column_aliases
 
 test_pass_dots_bigquery:
@@ -202,7 +202,7 @@ test_fail_star_bigquery:
       dialect: bigquery
 
 test_pass_hyphen_bigquery_quotes_1:
-  # Passes without setting 'additional_allowed_characters' because L057 allows
+  # Passes without setting 'additional_allowed_characters' because RF05 allows
   # hyphens in BigQuery by default.
   pass_str:
     SELECT a
@@ -221,7 +221,7 @@ test_pass_hyphen_bigquery_quotes_2:
     core:
       dialect: bigquery
     rules:
-      L057:
+      references.special_chars:
         additional_allowed_characters: '+'
 
 test_pass_hyphen_bigquery_no_quotes:
@@ -241,7 +241,7 @@ test_pass_dot_bigquery:
     core:
       dialect: bigquery
     rules:
-      L057:
+      references.special_chars:
         additional_allowed_characters: '-.'
 
 test_fail_single_quote_bigquery:
@@ -252,7 +252,7 @@ test_fail_single_quote_bigquery:
     core:
       dialect: bigquery
     rules:
-      L057:
+      references.special_chars:
         additional_allowed_characters: '-.'
 
 test_pass_single_quote_bigquery:
@@ -263,7 +263,7 @@ test_pass_single_quote_bigquery:
     core:
       dialect: bigquery
     rules:
-      L057:
+      references.special_chars:
         additional_allowed_characters: '-''.'
 
 test_pass_single_quote2_bigquery:
@@ -274,7 +274,7 @@ test_pass_single_quote2_bigquery:
     core:
       dialect: bigquery
     rules:
-      L057:
+      references.special_chars:
         additional_allowed_characters: "-'."
 
 test_pass_dot_slash_identifier_in_file_reference_sparksql:
@@ -346,7 +346,7 @@ test_pass_ignore_lists_quoted:
     SELECT a as 'aliashash#'
   configs:
     rules:
-      L057:
+      references.special_chars:
         ignore_words: aliashash#
 
 test_pass_ignore_lists_quoted_fail:
@@ -358,7 +358,7 @@ test_pass_ignore_lists_quoted_mixed_case:
     SELECT a as 'aliasHash#'
   configs:
     rules:
-      L057:
+      references.special_chars:
         ignore_words: aliashash#
 
 test_pass_ignore_lists_unquoted:
@@ -368,7 +368,7 @@ test_pass_ignore_lists_unquoted:
     core:
       dialect: postgres
     rules:
-      L057:
+      references.special_chars:
         ignore_words: alias$
 
 test_pass_ignore_lists_unquoted_fail:
@@ -385,7 +385,7 @@ test_pass_ignore_lists_unquoted_mixed_case:
     core:
       dialect: postgres
     rules:
-      L057:
+      references.special_chars:
         ignore_words: alias$
 
 test_pass_ignore_words_regex_unquoted:
@@ -395,7 +395,7 @@ test_pass_ignore_words_regex_unquoted:
     core:
       dialect: postgres
     rules:
-      L057:
+      references.special_chars:
         ignore_words_regex: lias\$$
 
 test_pass_ignore_words_regex_quoted:
@@ -403,7 +403,7 @@ test_pass_ignore_words_regex_quoted:
     SELECT a as 'aliashash#'
   configs:
     rules:
-      L057:
+      references.special_chars:
         ignore_words_regex: hash#$
 
 test_fail_ignore_words_regex_quoted_mixed_case:
@@ -411,7 +411,7 @@ test_fail_ignore_words_regex_quoted_mixed_case:
     SELECT a as 'aliasHash#'
   configs:
     rules:
-      L057:
+      references.special_chars:
         ignore_words_regex: hash#$
 
 test_pass_special_chars_show_tblproperties:
diff --git a/test/fixtures/rules/std_rule_cases/L059.yml b/test/fixtures/rules/std_rule_cases/RF06.yml
similarity index 87%
rename from test/fixtures/rules/std_rule_cases/L059.yml
rename to test/fixtures/rules/std_rule_cases/RF06.yml
index 1ce9156..186c019 100644
--- a/test/fixtures/rules/std_rule_cases/L059.yml
+++ b/test/fixtures/rules/std_rule_cases/RF06.yml
@@ -1,4 +1,4 @@
-rule: L059
+rule: RF06
 
 test_pass_column_reference:
   pass_str: |
@@ -53,7 +53,7 @@ test_pass_column_reference_prefer_quoted_ansi:
     SELECT 123 AS "foo";
   configs:
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_fail_column_reference_prefer_quoted_ansi:
@@ -61,7 +61,7 @@ test_fail_column_reference_prefer_quoted_ansi:
     SELECT 123 AS foo;
   configs:
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_pass_table_reference_prefer_quoted_ansi:
@@ -70,7 +70,7 @@ test_pass_table_reference_prefer_quoted_ansi:
     FROM "bar";
   configs:
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_fail_table_reference_prefer_quoted_ansi:
@@ -79,7 +79,7 @@ test_fail_table_reference_prefer_quoted_ansi:
     FROM bar;
   configs:
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_pass_multiple_references_prefer_quoted_ansi:
@@ -88,7 +88,7 @@ test_pass_multiple_references_prefer_quoted_ansi:
     FROM "bar"."baz";
   configs:
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_fail_multiple_references_prefer_quoted_ansi:
@@ -97,7 +97,7 @@ test_fail_multiple_references_prefer_quoted_ansi:
     FROM bar.baz;
   configs:
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_pass_whitespace_prefer_quoted_ansi:
@@ -105,7 +105,7 @@ test_pass_whitespace_prefer_quoted_ansi:
     SELECT 123 AS "I cannot be unquoted"
   configs:
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_pass_special_symbols_prefer_quoted_ansi:
@@ -113,7 +113,7 @@ test_pass_special_symbols_prefer_quoted_ansi:
     SELECT 123 AS "I-c@nn0t-be~un-quoted"
   configs:
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_pass_reserved_keyword_prefer_quoted_ansi:
@@ -121,7 +121,7 @@ test_pass_reserved_keyword_prefer_quoted_ansi:
     SELECT 123 AS "SELECT"
   configs:
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_pass_column_reference_prefer_quoted_backticks:
@@ -131,7 +131,7 @@ test_pass_column_reference_prefer_quoted_backticks:
     core:
       dialect: bigquery
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_fail_column_reference_prefer_quoted_backticks:
@@ -141,7 +141,7 @@ test_fail_column_reference_prefer_quoted_backticks:
     core:
       dialect: bigquery
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_pass_table_reference_prefer_quoted_backticks:
@@ -152,7 +152,7 @@ test_pass_table_reference_prefer_quoted_backticks:
     core:
       dialect: bigquery
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_fail_table_reference_prefer_quoted_backticks:
@@ -163,7 +163,7 @@ test_fail_table_reference_prefer_quoted_backticks:
     core:
       dialect: bigquery
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_pass_multiple_references_prefer_quoted_backticks:
@@ -174,7 +174,7 @@ test_pass_multiple_references_prefer_quoted_backticks:
     core:
       dialect: bigquery
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_fail_multiple_references_prefer_quoted_backticks:
@@ -185,7 +185,7 @@ test_fail_multiple_references_prefer_quoted_backticks:
     core:
       dialect: bigquery
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_pass_whitespace_prefer_quoted_backticks:
@@ -195,7 +195,7 @@ test_pass_whitespace_prefer_quoted_backticks:
     core:
       dialect: bigquery
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_pass_special_symbols_prefer_quoted_backticks:
@@ -205,7 +205,7 @@ test_pass_special_symbols_prefer_quoted_backticks:
     core:
       dialect: bigquery
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_pass_reserved_keyword_prefer_quoted_backticks:
@@ -215,7 +215,7 @@ test_pass_reserved_keyword_prefer_quoted_backticks:
     core:
       dialect: bigquery
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_pass_datetime_redshift:
@@ -267,7 +267,7 @@ test_pass_ignore_lists:
     SELECT 123 AS "foo";
   configs:
     rules:
-      L059:
+      references.quoting:
         ignore_words: foo
 
 test_pass_ignore_lists_mixed_case:
@@ -275,7 +275,7 @@ test_pass_ignore_lists_mixed_case:
     SELECT 123 AS "Foo";
   configs:
     rules:
-      L059:
+      references.quoting:
         ignore_words: foo
 
 test_pass_ignore_words_regex:
@@ -283,7 +283,7 @@ test_pass_ignore_words_regex:
     SELECT 123 AS "foo";
   configs:
     rules:
-      L059:
+      references.quoting:
         ignore_words_regex: ^fo
 
 test_pass_ignore_words_regex_mixed_case:
@@ -291,7 +291,7 @@ test_pass_ignore_words_regex_mixed_case:
     SELECT 123 AS "Foo";
   configs:
     rules:
-      L059:
+      references.quoting:
         ignore_words_regex: ^Fo
 
 test_pass_ignore_if:
@@ -299,7 +299,7 @@ test_pass_ignore_if:
     DROP TABLE IF EXISTS "example";
   configs:
     rules:
-      L059:
+      references.quoting:
         prefer_quoted_identifiers: true
 
 test_fail_insert_overwrite_directory:
@@ -355,7 +355,7 @@ test_fail_quoted_column_snowflake_force_enable:
     core:
       dialect: snowflake
     rules:
-      L059:
+      references.quoting:
         force_enable: true
 
 test_pass_quoted_column_postgres:
@@ -378,5 +378,23 @@ test_fail_quoted_column_postgres_force_enable:
     core:
       dialect: postgres
     rules:
-      L059:
+      references.quoting:
         force_enable: true
+
+test_pass_prefer_quoted_keywords_athena:
+  pass_str: SELECT 1 AS "metadata"
+  configs:
+    rules:
+      references.quoting:
+        prefer_quoted_keywords: true
+    core:
+      dialect: athena
+
+test_fail_prefer_quoted_keywords_athena:
+  fail_str: SELECT 1 AS metadata
+  configs:
+    rules:
+      references.quoting:
+        prefer_quoted_keywords: true
+    core:
+      dialect: athena
diff --git a/test/fixtures/rules/std_rule_cases/L035.yml b/test/fixtures/rules/std_rule_cases/ST01.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L035.yml
rename to test/fixtures/rules/std_rule_cases/ST01.yml
index be655e8..4d3c79c 100644
--- a/test/fixtures/rules/std_rule_cases/L035.yml
+++ b/test/fixtures/rules/std_rule_cases/ST01.yml
@@ -1,4 +1,4 @@
-rule: L035
+rule: ST01
 
 no_redundant_else_null:
   pass_str: |
diff --git a/test/fixtures/rules/std_rule_cases/L043.yml b/test/fixtures/rules/std_rule_cases/ST02.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L043.yml
rename to test/fixtures/rules/std_rule_cases/ST02.yml
index ac762b9..7800c46 100644
--- a/test/fixtures/rules/std_rule_cases/L043.yml
+++ b/test/fixtures/rules/std_rule_cases/ST02.yml
@@ -1,4 +1,4 @@
-rule: L043
+rule: ST02
 
 test_pass_case_cannot_be_reduced_1:
   pass_str: |
@@ -162,7 +162,7 @@ test_fail_unnecessary_case_6:
     select
         subscriptions_xf.metadata_migrated,
 
-        case  -- BEFORE L043 FIX
+        case  -- BEFORE ST02 FIX
             when perks.perk is null then false
             else true
         end as perk_redeemed,
diff --git a/test/fixtures/rules/std_rule_cases/L045.yml b/test/fixtures/rules/std_rule_cases/ST03.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L045.yml
rename to test/fixtures/rules/std_rule_cases/ST03.yml
index e95727e..4f82d4f 100644
--- a/test/fixtures/rules/std_rule_cases/L045.yml
+++ b/test/fixtures/rules/std_rule_cases/ST03.yml
@@ -1,4 +1,4 @@
-rule: L045
+rule: ST03
 
 test_pass_no_cte_defined_1:
   pass_str: select * from t
diff --git a/test/fixtures/rules/std_rule_cases/L058.yml b/test/fixtures/rules/std_rule_cases/ST04.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L058.yml
rename to test/fixtures/rules/std_rule_cases/ST04.yml
index c7e34f1..bb661bb 100644
--- a/test/fixtures/rules/std_rule_cases/L058.yml
+++ b/test/fixtures/rules/std_rule_cases/ST04.yml
@@ -1,4 +1,4 @@
-rule: L058
+rule: ST04
 
 test_pass_1:
   # The nested CASE is under a "WHEN", not an "ELSE".
diff --git a/test/fixtures/rules/std_rule_cases/L042.yml b/test/fixtures/rules/std_rule_cases/ST05.yml
similarity index 95%
rename from test/fixtures/rules/std_rule_cases/L042.yml
rename to test/fixtures/rules/std_rule_cases/ST05.yml
index 1c97ccd..d0a2274 100644
--- a/test/fixtures/rules/std_rule_cases/L042.yml
+++ b/test/fixtures/rules/std_rule_cases/ST05.yml
@@ -1,4 +1,4 @@
-rule: L042
+rule: ST05
 
 select_fail:
   fail_str: |
@@ -96,7 +96,7 @@ double_nested_fail:
     join b on (a.x = b.x)
   configs:
     rules:
-      L042:
+      structure.subquery:
         forbid_subquery_in: both
 
 double_nested_fail_2:
@@ -123,9 +123,10 @@ double_nested_fail_2:
     - description: select_statement clauses should not contain subqueries. Use CTEs instead
       line_no: 2
       line_pos: 20
+      name: structure.subquery
   configs:
     rules:
-      L042:
+      structure.subquery:
         forbid_subquery_in: both
 
 unfixable_cte_clash:
@@ -158,9 +159,10 @@ unfixable_cte_clash:
     - description: select_statement clauses should not contain subqueries. Use CTEs instead
       line_no: 5
       line_pos: 20
+      name: structure.subquery
   configs:
     rules:
-      L042:
+      structure.subquery:
         forbid_subquery_in: both
 
 with_recursive_fail_no_fix:
@@ -196,7 +198,7 @@ select_multijoin_fail:
     join b using (x)
   configs:
     rules:
-      L042:
+      structure.subquery:
         forbid_subquery_in: both
 
 with_fail:
@@ -262,7 +264,7 @@ from_clause_pass:
     ) as a
   configs:
     rules:
-      L042:
+      structure.subquery:
         forbid_subquery_in: join
 
 from_clause_fail:
@@ -281,7 +283,7 @@ from_clause_fail:
     from a
   configs:
     rules:
-      L042:
+      structure.subquery:
         forbid_subquery_in: from
 
 both_clause_fail:
@@ -300,7 +302,7 @@ both_clause_fail:
     from a
   configs:
     rules:
-      L042:
+      structure.subquery:
         forbid_subquery_in: both
 
 no_inner_from_pass:
@@ -412,7 +414,7 @@ issue_3623_internal_error_multiple_templated_files:
     core:
       dialect: tsql
     rules:
-      L042:
+      structure.subquery:
         forbid_subquery_in: both
 
 issue_3622_no_space_after_from:
@@ -435,7 +437,7 @@ issue_3622_no_space_after_from:
     FROM x
   configs:
     rules:
-      L042:
+      structure.subquery:
         forbid_subquery_in: both
 
 issue_3617_parentheses_around_ctas_select:
@@ -451,7 +453,7 @@ issue_3617_parentheses_around_ctas_select:
     )
   configs:
     rules:
-      L042:
+      structure.subquery:
         forbid_subquery_in: both
 
 issue_3572_correlated_subquery_1:
@@ -502,7 +504,7 @@ issue_3598_avoid_looping_1:
     SELECT a FROM cte1
   configs:
     rules:
-      L042:
+      structure.subquery:
         forbid_subquery_in: both
 
 issue_3598_avoid_looping_2:
@@ -521,7 +523,7 @@ issue_3598_avoid_looping_2:
     SELECT * FROM cte1
   configs:
     rules:
-      L042:
+      structure.subquery:
         forbid_subquery_in: both
 
 test_fail_subquery_in_cte:
@@ -544,7 +546,7 @@ test_fail_subquery_in_cte:
     from b
   configs:
     rules:
-      L042:
+      structure.subquery:
         forbid_subquery_in: both
 
 test_fail_subquery_in_cte_2:
@@ -593,7 +595,7 @@ test_fail_subquery_in_cte_2:
     from b
   configs:
     rules:
-      L042:
+      structure.subquery:
         forbid_subquery_in: both
 
 test_fail_subquery_in_cte_3:
@@ -632,5 +634,5 @@ test_fail_subquery_in_cte_3:
     from b
   configs:
     rules:
-      L042:
+      structure.subquery:
         forbid_subquery_in: both
diff --git a/test/fixtures/rules/std_rule_cases/L034.yml b/test/fixtures/rules/std_rule_cases/ST06.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L034.yml
rename to test/fixtures/rules/std_rule_cases/ST06.yml
index 78c975c..9d8e061 100644
--- a/test/fixtures/rules/std_rule_cases/L034.yml
+++ b/test/fixtures/rules/std_rule_cases/ST06.yml
@@ -1,4 +1,4 @@
-rule: L034
+rule: ST06
 
 test_pass_select_statement_order:
   pass_str: |
diff --git a/test/fixtures/rules/std_rule_cases/L032.yml b/test/fixtures/rules/std_rule_cases/ST07.yml
similarity index 91%
rename from test/fixtures/rules/std_rule_cases/L032.yml
rename to test/fixtures/rules/std_rule_cases/ST07.yml
index 79b0c82..5d64972 100644
--- a/test/fixtures/rules/std_rule_cases/L032.yml
+++ b/test/fixtures/rules/std_rule_cases/ST07.yml
@@ -1,4 +1,4 @@
-rule: L032
+rule: ST07
 
 test_pass_specify_join_keys:
   pass_str: select x.a from x inner join y on x.id = y.id
@@ -38,6 +38,7 @@ test_partial_fixed_up_to_2nd_join:
     - description: Found USING statement. Expected only ON statements.
       line_no: 4
       line_pos: 14
+      name: structure.using
 
 select_using_fail:
   fail_str: |
@@ -65,3 +66,9 @@ fail_but_dont_fix_templated_table_names:
     FROM
         {{ "table_a" }}
     INNER JOIN table_b USING (id)
+
+test_pass_clickhouse:
+  pass_str: SELECT * FROM test1 as t1 LEFT SEMI JOIN test2 USING ty1,ty2;
+  configs:
+    core:
+      dialect: clickhouse
diff --git a/test/fixtures/rules/std_rule_cases/L015.yml b/test/fixtures/rules/std_rule_cases/ST08.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L015.yml
rename to test/fixtures/rules/std_rule_cases/ST08.yml
index a7f647f..982e866 100644
--- a/test/fixtures/rules/std_rule_cases/L015.yml
+++ b/test/fixtures/rules/std_rule_cases/ST08.yml
@@ -1,4 +1,4 @@
-rule: L015
+rule: ST08
 
 test_fail_distinct_with_parenthesis_1:
   # Check we get fails for using DISTINCT apparently incorrectly
diff --git a/test/fixtures/rules/std_rule_cases/L056.yml b/test/fixtures/rules/std_rule_cases/TQ01.yml
similarity index 99%
rename from test/fixtures/rules/std_rule_cases/L056.yml
rename to test/fixtures/rules/std_rule_cases/TQ01.yml
index 7b92cfb..3e5e659 100644
--- a/test/fixtures/rules/std_rule_cases/L056.yml
+++ b/test/fixtures/rules/std_rule_cases/TQ01.yml
@@ -1,4 +1,4 @@
-rule: L056
+rule: TQ01
 
 test_fail_sp_prefix_1:
   fail_str: |
diff --git a/test/fixtures/templater/jinja_c_dbt/dbt_builtins_test.sql b/test/fixtures/templater/jinja_c_dbt/dbt_builtins_test.sql
new file mode 100644
index 0000000..2df7604
--- /dev/null
+++ b/test/fixtures/templater/jinja_c_dbt/dbt_builtins_test.sql
@@ -0,0 +1,11 @@
+{% test my_cool_test(model, column_name, kwarg1=none, kwarg2=none) %}
+SELECT {{ column_name }} FROM {{ model }}
+WHERE thing = 1
+{% if kwarg1 %}
+AND otherthing = 2
+{% endif %}
+{% if kwarg2 %}
+AND anotherthing = 3
+{% endif %}
+{% endtest %}
+-- no sql produced
diff --git a/test/fixtures/templater/jinja_c_dbt/dbt_builtins_test.yml b/test/fixtures/templater/jinja_c_dbt/dbt_builtins_test.yml
new file mode 100644
index 0000000..8be5edd
--- /dev/null
+++ b/test/fixtures/templater/jinja_c_dbt/dbt_builtins_test.yml
@@ -0,0 +1 @@
+file: []
diff --git a/test/fixtures/templater/jinja_l_metas/001.yml b/test/fixtures/templater/jinja_l_metas/001.yml
index f5ea219..2bf9238 100644
--- a/test/fixtures/templater/jinja_l_metas/001.yml
+++ b/test/fixtures/templater/jinja_l_metas/001.yml
@@ -1,5 +1,9 @@
 file:
-  - placeholder: '{% set some_condition %}...{% endset %}'
+  - placeholder: '{% set some_condition %}'
+  - indent: ''
+  - placeholder: 'TRUE'
+  - dedent: ''
+  - placeholder: '{% endset %}'
   - newline: "\n"
   - newline: "\n"
   - statement:
diff --git a/test/fixtures/templater/jinja_l_metas/002.yml b/test/fixtures/templater/jinja_l_metas/002.yml
index 9bcffb1..8f4f0b1 100644
--- a/test/fixtures/templater/jinja_l_metas/002.yml
+++ b/test/fixtures/templater/jinja_l_metas/002.yml
@@ -5,17 +5,21 @@ file:
       - keyword: SELECT
       - indent: ''
       - newline: "\n"
-      # NB: We *shouldn't* have a template_loop here.
-      - whitespace: '          '
+      # NB: We end up with double whitespace here
+      # because one is literal and one is templated.
+      - whitespace: '    '
+      - whitespace: '      '
       - select_clause_element:
           column_reference:
             naked_identifier: c2
           newline: "\n"
           whitespace: ' '
           alias_expression:
+            indent: ''
             keyword: AS
             whitespace: ' '
             naked_identifier: other_id
+            dedent: ''
       - comma: ','
       - newline: "\n"
       - whitespace: '    '
@@ -111,7 +115,11 @@ file:
         - newline: "\n"
         - whitespace: '        '
         - dedent: ''
-        - placeholder: '{% else %}... [103 unused template characters] ...{% endif %}'
+        - placeholder: '{% else %}'
+        - indent: ''
+        - placeholder: '... [103 unused template characters] ...'
+        - dedent: ''
+        - placeholder: '{% endif %}'
         - newline: "\n"
         - whitespace: '    '
         - dedent: ''
@@ -119,7 +127,11 @@ file:
         - indent: ''
         - newline: "\n"
         - whitespace: '        '
-        - placeholder: '{% if loop.first %}... [49 unused template characters] ...{% else %}'
+        - placeholder: '{% if loop.first %}'
+        - indent: ''
+        - placeholder: '... [49 unused template characters] ...'
+        - dedent: ''
+        - placeholder: '{% else %}'
         - indent: ''
         - newline: "\n"
         - whitespace: '        '
@@ -135,6 +147,8 @@ file:
                   naked_identifier: b_raw_effect_sizes
           - newline: "\n"
           - whitespace: '        '
+          - dedent: ''
+          - indent: ''
           - keyword: USING
           - indent: ''
           - newline: "\n"
@@ -162,7 +176,11 @@ file:
         - indent: ''
         - newline: "\n"
         - whitespace: '        '
-        - placeholder: '{% if loop.first %}... [49 unused template characters] ...{% else %}'
+        - placeholder: '{% if loop.first %}'
+        - indent: ''
+        - placeholder: '... [49 unused template characters] ...'
+        - dedent: ''
+        - placeholder: '{% else %}'
         - indent: ''
         - newline: "\n"
         - whitespace: '        '
@@ -177,6 +195,8 @@ file:
                   naked_identifier: c_raw_effect_sizes
           - newline: "\n"
           - whitespace: '        '
+          - dedent: ''
+          - indent: ''
           - keyword: USING
           - indent: ''
           - newline: "\n"
diff --git a/test/fixtures/templater/jinja_l_metas/003.yml b/test/fixtures/templater/jinja_l_metas/003.yml
index c75bd81..50bfe46 100644
--- a/test/fixtures/templater/jinja_l_metas/003.yml
+++ b/test/fixtures/templater/jinja_l_metas/003.yml
@@ -11,5 +11,9 @@ file:
         dedent: ""
   - newline: "\n"
   - whitespace: '  '
-  - placeholder: '{% if false %}... [11 unused template characters] ...{% endif %}'
+  - placeholder: '{% if false %}'
+  - indent: ""
+  - placeholder: '... [11 unused template characters] ...'
+  - dedent: ""
+  - placeholder: '{% endif %}'
   - end_of_file: ""
diff --git a/test/fixtures/templater/jinja_l_metas/006.yml b/test/fixtures/templater/jinja_l_metas/006.yml
index 23e5eb1..489d6bb 100644
--- a/test/fixtures/templater/jinja_l_metas/006.yml
+++ b/test/fixtures/templater/jinja_l_metas/006.yml
@@ -16,7 +16,8 @@ file:
             - whitespace: ' '
             - numeric_literal: '1'
         dedent: ""
+  - placeholder: "\n"
   - dedent: ""
-  - placeholder: "\n{%- endif %}"
+  - placeholder: "{%- endif %}"
   - newline: "\n"
   - end_of_file: ""
diff --git a/test/fixtures/templater/jinja_l_metas/007.sql b/test/fixtures/templater/jinja_l_metas/007.sql
new file mode 100644
index 0000000..a87c4e5
--- /dev/null
+++ b/test/fixtures/templater/jinja_l_metas/007.sql
@@ -0,0 +1 @@
+SELECT 1 {{ " + 2" if false }} FROM {%+if true-%} {{ref('foo')}} {%-endif%}
\ No newline at end of file
diff --git a/test/fixtures/templater/jinja_l_metas/007.yml b/test/fixtures/templater/jinja_l_metas/007.yml
new file mode 100644
index 0000000..bb417b0
--- /dev/null
+++ b/test/fixtures/templater/jinja_l_metas/007.yml
@@ -0,0 +1,30 @@
+file:
+- statement:
+    select_statement:
+    - select_clause:
+        keyword: SELECT
+        indent: ""
+        whitespace: ' '
+        select_clause_element:
+          numeric_literal: '1'
+    - whitespace: ' '
+    - placeholder: '{{ " + 2" if false }}'
+    - whitespace: ' '
+    - dedent: ""
+    - from_clause:
+      - keyword: FROM
+      - whitespace: ' '
+      - placeholder: '{%+if true-%}'
+      - indent: ""
+      - placeholder: ' '
+      - from_expression:
+          indent: ""
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: foo
+          dedent: ""
+- placeholder: ' '
+- dedent: ""
+- placeholder: '{%-endif%}'
+- end_of_file: ""
diff --git a/test/fixtures/templater/jinja_l_metas/008.sql b/test/fixtures/templater/jinja_l_metas/008.sql
new file mode 100644
index 0000000..a3e869f
--- /dev/null
+++ b/test/fixtures/templater/jinja_l_metas/008.sql
@@ -0,0 +1,5 @@
+{% for item in [1,2] -%}
+SELECT *
+FROM some_table
+{{ "UNION ALL\n" if not loop.last }}
+{%- endfor %}
\ No newline at end of file
diff --git a/test/fixtures/templater/jinja_l_metas/008.yml b/test/fixtures/templater/jinja_l_metas/008.yml
new file mode 100644
index 0000000..d2b8de5
--- /dev/null
+++ b/test/fixtures/templater/jinja_l_metas/008.yml
@@ -0,0 +1,65 @@
+file:
+  - placeholder: "{% for item in [1,2] -%}"
+  - indent: ""
+  - placeholder: "\n"
+  - statement:
+      set_expression:
+      - select_statement:
+          select_clause:
+            keyword: SELECT
+            indent: ""
+            whitespace: ' '
+            select_clause_element:
+              wildcard_expression:
+                wildcard_identifier:
+                  star: '*'
+          newline: "\n"
+          dedent: ""
+          from_clause:
+            keyword: FROM
+            whitespace: ' '
+            from_expression:
+              indent: ""
+              from_expression_element:
+                table_expression:
+                  table_reference:
+                    naked_identifier: some_table
+              dedent: ""
+      - newline: "\n"
+      - set_operator:
+        - keyword: UNION
+        - whitespace: ' '
+        - keyword: ALL
+      - newline: "\n"
+      - placeholder: "\n"
+      - dedent: ""
+      - template_loop: ""
+      - indent: ""
+      - placeholder: "\n"
+      - select_statement:
+          select_clause:
+            keyword: SELECT
+            indent: ""
+            whitespace: ' '
+            select_clause_element:
+              wildcard_expression:
+                wildcard_identifier:
+                  star: '*'
+          newline: "\n"
+          dedent: ""
+          from_clause:
+            keyword: FROM
+            whitespace: ' '
+            from_expression:
+              indent: ""
+              from_expression_element:
+                table_expression:
+                  table_reference:
+                    naked_identifier: some_table
+              dedent: ""
+  - newline: "\n"
+  - placeholder: '{{ "UNION ALL\n" if not loop.last }}'
+  - placeholder: "\n"
+  - dedent: ""
+  - placeholder: '{%- endfor %}'
+  - end_of_file: ""
diff --git a/test/fixtures/templater/jinja_l_metas/009.sql b/test/fixtures/templater/jinja_l_metas/009.sql
new file mode 100644
index 0000000..9bde0fa
--- /dev/null
+++ b/test/fixtures/templater/jinja_l_metas/009.sql
@@ -0,0 +1,7 @@
+SELECT
+    1
+{% if true %}
+    ,2
+FROM a
+{% endif %}
+LIMIT 1
diff --git a/test/fixtures/templater/jinja_l_metas/009.yml b/test/fixtures/templater/jinja_l_metas/009.yml
new file mode 100644
index 0000000..f6e23df
--- /dev/null
+++ b/test/fixtures/templater/jinja_l_metas/009.yml
@@ -0,0 +1,42 @@
+file:
+  statement:
+    select_statement:
+    - select_clause:
+      - keyword: SELECT
+      - indent: ""
+      - newline: "\n"
+      - whitespace: '    '
+      - select_clause_element:
+          numeric_literal: '1'
+      - newline: "\n"
+      - placeholder: '{% if true %}'
+      - indent: ""
+      - newline: "\n"
+      - whitespace: '    '
+      - comma: ','
+      - select_clause_element:
+          numeric_literal: '2'
+    - newline: "\n"
+    - dedent: ""
+    - from_clause:
+        keyword: FROM
+        whitespace: ' '
+        from_expression:
+          indent: ""
+          from_expression_element:
+            table_expression:
+              table_reference:
+                naked_identifier: a
+          dedent: ""
+    - newline: "\n"
+    - dedent: ""
+    - placeholder: '{% endif %}'
+    - newline: "\n"
+    - limit_clause:
+        keyword: LIMIT
+        indent: ""
+        whitespace: ' '
+        numeric_literal: '1'
+        dedent: ""
+  newline: "\n"
+  end_of_file: ""
diff --git a/test/generate_parse_fixture_yml.py b/test/generate_parse_fixture_yml.py
index 14e350c..488c857 100644
--- a/test/generate_parse_fixture_yml.py
+++ b/test/generate_parse_fixture_yml.py
@@ -28,16 +28,16 @@ def distribute_work(work_items: List[S], work_fn: Callable[[S], None]) -> None:
             pass
 
 
-def _create_yaml_path(example: ParseExample) -> str:
+def _create_file_path(example: ParseExample, ext: str = ".yml") -> str:
     dialect, sqlfile = example
     root, _ = os.path.splitext(sqlfile)
-    path = os.path.join("test", "fixtures", "dialects", dialect, root + ".yml")
+    path = os.path.join("test", "fixtures", "dialects", dialect, root + ext)
     return path
 
 
 def _is_matching_new_criteria(example: ParseExample):
     """Is the Yaml doesn't exist or is older than the SQL."""
-    yaml_path = _create_yaml_path(example)
+    yaml_path = _create_file_path(example)
     if not os.path.exists(yaml_path):
         return True
 
@@ -55,9 +55,21 @@ def generate_one_parse_fixture(example: ParseExample) -> None:
     """Parse example SQL file, write parse tree to YAML file."""
     dialect, sqlfile = example
     tree = parse_example_file(dialect, sqlfile)
+
+    # Check we don't have any base types or unparsable sections
+    types = tree.type_set()
+    sql_path = _create_file_path(example, ".sql")
+    if "base" in types:
+        raise SQLParseError(f"Unnamed base section when parsing: {sql_path}")
+    if "unparsable" in types:
+        for unparsable in tree.iter_unparsables():
+            print("Found unparsable segment...")
+            print(unparsable.stringify())
+        raise SQLParseError(f"Could not parse: {sql_path}")
+
     _hash = compute_parse_tree_hash(tree)
     # Remove the .sql file extension
-    path = _create_yaml_path(example)
+    path = _create_file_path(example)
     with open(path, "w", newline="\n") as f:
         r: Optional[Dict[str, Optional[str]]] = None
 
@@ -65,16 +77,6 @@ def generate_one_parse_fixture(example: ParseExample) -> None:
             f.write("")
             return
 
-        # Check we don't have any base types or unparsable sections
-        types = tree.type_set()
-        if "base" in types:
-            raise SQLParseError(f"Unnamed base section when parsing: {f.name}")
-        if "unparsable" in types:
-            for unparsable in tree.iter_unparsables():
-                print("Found unparsable segment...")
-                print(unparsable.stringify())
-            raise SQLParseError(f"Could not parse: {f.name}")
-
         records = tree.as_record(code_only=True, show_raw=True)
         assert records, "TypeGuard"
         r = dict([("_hash", _hash), *list(records.items())])
diff --git a/test/patch_lcov.py b/test/patch_lcov.py
new file mode 100644
index 0000000..60d3f41
--- /dev/null
+++ b/test/patch_lcov.py
@@ -0,0 +1,33 @@
+"""Replaces .tox/ paths in the lcov file with paths relative to repo root.
+
+Context: When the CI build runs tests, it uses tox, which installs SQLFluff
+in a virtual environment. Thus, the coverage.lcov file generated by the tests
+contains paths to the virtual environment. This script replaces those paths
+with paths relative to the repo root. This allows the lcov file to be used by
+Coveralls. Without this, Coveralls has valid coverage info, but it generates
+URLs that point to source files that don't exist in the SQLFluff GitHub repo.
+
+For example, we want to change this:
+SF:.tox/py/lib/python3.10/site-packages/sqlfluff/__init__.py
+
+to this:
+SF:src/sqlfluff/__init__.py
+"""
+import re
+from pathlib import Path
+
+path = Path("coverage.lcov")
+if path.exists():
+    lines = path.read_text().splitlines()
+    modified_lines = []
+    for line in lines:
+        if line.startswith("SF:.tox"):
+            m = re.search(r"^(SF:).*(sqlfluff/.*)", line)
+            if m:
+                modified_lines.append(f"{m.group(1)}src/{m.group(2)}")
+            else:
+                print(f"Could not patch line: {line}")
+                modified_lines.append(line)
+        else:
+            modified_lines.append(line)
+    path.write_text("\n".join(modified_lines))
diff --git a/test/rules/std_L020_test.py b/test/rules/std_AL04_test.py
similarity index 79%
rename from test/rules/std_L020_test.py
rename to test/rules/std_AL04_test.py
index e6c9b76..e03604b 100644
--- a/test/rules/std_L020_test.py
+++ b/test/rules/std_AL04_test.py
@@ -1,9 +1,9 @@
-"""Tests the python routines within L020."""
+"""Tests the python routines within AL04."""
 
 import sqlfluff
 
 
-def test__rules__std_L020_one_aliases_one_duplicate():
+def test__rules__std_AL04_one_aliases_one_duplicate():
     """Verify correct error message for one duplicate table aliases occur one times."""
     sql = """
         SELECT
@@ -12,11 +12,11 @@ def test__rules__std_L020_one_aliases_one_duplicate():
         JOIN table_2 AS a ON a.pk = a.pk
     """
     result = sqlfluff.lint(sql)
-    assert "L020" in [r["code"] for r in result]
-    assert [r["code"] for r in result].count("L020") == 1
+    assert "AL04" in [r["code"] for r in result]
+    assert [r["code"] for r in result].count("AL04") == 1
 
 
-def test__rules__std_L020_one_aliases_two_duplicate():
+def test__rules__std_AL04_one_aliases_two_duplicate():
     """Verify correct error message for one duplicate table aliases occur two times."""
     sql = """
         SELECT
@@ -26,7 +26,7 @@ def test__rules__std_L020_one_aliases_two_duplicate():
         JOIN table_3 AS a ON a.pk = a.pk
     """
     result = sqlfluff.lint(sql)
-    result_filter = [r for r in result if r["code"] == "L020"]
+    result_filter = [r for r in result if r["code"] == "AL04"]
     # Error message only show two times, not three
     assert len(result_filter) == 2
     assert (
@@ -44,8 +44,8 @@ def test__rules__std_L020_one_aliases_two_duplicate():
     assert result_filter[1]["line_no"] == 6
 
 
-def test__rules__std_L020_complex():
-    """Verify that L020 returns the correct error message for complex example."""
+def test__rules__std_AL04_complex():
+    """Verify that AL04 returns the correct error message for complex example."""
     sql = """
         SELECT
             a.pk,
@@ -57,7 +57,7 @@ def test__rules__std_L020_complex():
         JOIN table_5 AS a ON b.pk = a.pk
     """
     result = sqlfluff.lint(sql)
-    result_filter = [r for r in result if r["code"] == "L020"]
+    result_filter = [r for r in result if r["code"] == "AL04"]
     # Error message only show two times, not three
     assert len(result_filter) == 3
     assert (
diff --git a/test/rules/std_L054_test.py b/test/rules/std_AM06_test.py
similarity index 60%
rename from test/rules/std_L054_test.py
rename to test/rules/std_AM06_test.py
index 55378c8..5697cfa 100644
--- a/test/rules/std_L054_test.py
+++ b/test/rules/std_AM06_test.py
@@ -1,9 +1,9 @@
-"""Tests the python routines within L054."""
+"""Tests the python routines within AM06."""
 import sqlfluff
 
 
-def test__rules__std_L054_raised() -> None:
-    """Test case for multiple L054 errors raised with 'consistent' setting."""
+def test__rules__std_AM06_raised() -> None:
+    """Test case for multiple AM06 errors raised with 'consistent' setting."""
     sql = """
     SELECT
         foo,
@@ -25,16 +25,16 @@ def test__rules__std_L054_raised() -> None:
     """
     result = sqlfluff.lint(sql)
 
-    results_l054 = [r for r in result if r["code"] == "L054"]
-    assert len(results_l054) == 2
+    results_AM06 = [r for r in result if r["code"] == "AM06"]
+    assert len(results_AM06) == 2
     assert (
-        results_l054[0]["description"]
+        results_AM06[0]["description"]
         == "Inconsistent column references in 'GROUP BY/ORDER BY' clauses."
     )
 
 
-def test__rules__std_L054_unparsable() -> None:
-    """Test unparsable group by doesn't result in bad rule L054 error."""
+def test__rules__std_AM06_unparsable() -> None:
+    """Test unparsable group by doesn't result in bad rule AM06 error."""
     sql = """
     SELECT foo.set.barr
     FROM foo
@@ -43,14 +43,14 @@ def test__rules__std_L054_unparsable() -> None:
     """
     result = sqlfluff.lint(sql)
 
-    results_l054 = [r for r in result if r["code"] == "L054"]
+    results_AM06 = [r for r in result if r["code"] == "AM06"]
     results_prs = [r for r in result if r["code"] == "PRS"]
-    assert len(results_l054) == 0
+    assert len(results_AM06) == 0
     assert len(results_prs) > 0
 
 
-def test__rules__std_L054_noqa() -> None:
-    """Test unparsable group by with no qa doesn't result in bad rule L054 error."""
+def test__rules__std_AM06_noqa() -> None:
+    """Test unparsable group by with no qa doesn't result in bad rule AM06 error."""
     sql = """
     SELECT foo.set.barr  --noqa: PRS
     FROM foo
@@ -59,7 +59,7 @@ def test__rules__std_L054_noqa() -> None:
     """
     result = sqlfluff.lint(sql)
 
-    results_l054 = [r for r in result if r["code"] == "L054"]
+    results_AM06 = [r for r in result if r["code"] == "AM06"]
     results_prs = [r for r in result if r["code"] == "PRS"]
-    assert len(results_l054) == 0
+    assert len(results_AM06) == 0
     assert len(results_prs) == 0
diff --git a/test/rules/std_L060_test.py b/test/rules/std_CV02_test.py
similarity index 56%
rename from test/rules/std_L060_test.py
rename to test/rules/std_CV02_test.py
index afd01c9..cda5e64 100644
--- a/test/rules/std_L060_test.py
+++ b/test/rules/std_CV02_test.py
@@ -1,11 +1,11 @@
-"""Tests the python routines within L060."""
+"""Tests the python routines within CV02."""
 import sqlfluff
 
 
-def test__rules__std_L060_raised() -> None:
-    """L060 is raised for use of ``IFNULL`` or ``NVL``."""
+def test__rules__std_CV02_raised() -> None:
+    """CV02 is raised for use of ``IFNULL`` or ``NVL``."""
     sql = "SELECT\n\tIFNULL(NULL, 100),\n\tNVL(NULL,100);"
-    result = sqlfluff.lint(sql, rules=["L060"])
+    result = sqlfluff.lint(sql, rules=["CV02"])
 
     assert len(result) == 2
     assert result[0]["description"] == "Use 'COALESCE' instead of 'IFNULL'."
diff --git a/test/rules/std_L062_test.py b/test/rules/std_CV09_test.py
similarity index 72%
rename from test/rules/std_L062_test.py
rename to test/rules/std_CV09_test.py
index a6d7851..0b62028 100644
--- a/test/rules/std_L062_test.py
+++ b/test/rules/std_CV09_test.py
@@ -1,14 +1,14 @@
-"""Tests the python routines within L062."""
+"""Tests the python routines within CV09."""
 from sqlfluff.core import FluffConfig
 from sqlfluff.core import Linter
 
 
-def test__rules__std_L062_raised() -> None:
-    """L062 is raised for use of blocked words with correct error message."""
+def test__rules__std_CV09_raised() -> None:
+    """CV09 is raised for use of blocked words with correct error message."""
     sql = "SELECT MYOLDFUNCTION(col1) FROM deprecated_table;\n"
     cfg = FluffConfig(overrides={"dialect": "ansi"})
     cfg.set_value(
-        config_path=["rules", "L062", "blocked_words"],
+        config_path=["rules", "convention.blocked_words", "blocked_words"],
         val="myoldfunction,deprecated_table",
     )
     linter = Linter(config=cfg)
diff --git a/test/rules/std_L003_L036_L039_combo_test.py b/test/rules/std_L003_L036_L039_combo_test.py
deleted file mode 100644
index 71180bf..0000000
--- a/test/rules/std_L003_L036_L039_combo_test.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""Tests issue #1373 doesn't reoccur.
-
-The combination of L003 (incorrect indentation), L036 (select targets),
-and L039 (unnecessary white space) can result in incorrect indentation.
-"""
-
-import sqlfluff
-
-
-def test__rules__std_L003_L036_L039():
-    """Verify that double indents don't flag L039."""
-    sql = """
-    WITH example AS (
-        SELECT my_id,
-            other_thing,
-            one_more
-        FROM
-            my_table
-    )
-
-    SELECT my_id
-    FROM example\n"""
-    fixed_sql = """
-    WITH example AS (
-        SELECT
-            my_id,
-            other_thing,
-            one_more
-        FROM
-            my_table
-    )
-
-    SELECT my_id
-    FROM example\n"""
-    result = sqlfluff.fix(sql, exclude_rules=["L050"])
-    assert result == fixed_sql
diff --git a/test/rules/std_L003_test.py b/test/rules/std_L003_test.py
deleted file mode 100644
index c3bb4b9..0000000
--- a/test/rules/std_L003_test.py
+++ /dev/null
@@ -1,83 +0,0 @@
-"""Tests the python routines within L003."""
-import pytest
-
-from sqlfluff.rules.L003 import Rule_L003, _Desc
-
-
-@pytest.mark.parametrize(
-    "indent_unit,num,tab_space_size,result",
-    [
-        ("space", 3, 2, "      "),
-        ("tab", 3, 2, "\t\t\t"),
-    ],
-)
-def test__rules__std_L003_make_indent(indent_unit, num, tab_space_size, result):
-    """Test Rule_L003._make_indent."""
-    res = Rule_L003._make_indent(
-        num=num, indent_unit=indent_unit, tab_space_size=tab_space_size
-    )
-    assert res == result
-
-
-def test__rules__std_L003_make_indent_invalid_param():
-    """Test Rule_L003._make_indent with invalid indent_unit parameter."""
-    with pytest.raises(ValueError):
-        Rule_L003._make_indent(indent_unit="aaa")
-
-
-class ProtoSeg:
-    """Proto Seg for testing."""
-
-    def __init__(self, raw):
-        self.raw = raw
-
-    def is_type(self, *seg_type):
-        """Is this segment (or its parent) of the given type."""
-        return False
-
-
-@pytest.mark.parametrize(
-    "tab_space_size,segments,result",
-    [
-        # Integer examples
-        (3, [ProtoSeg("      ")], 6),
-        (2, [ProtoSeg("\t\t")], 4),
-    ],
-)
-def test__rules__std_L003_indent_size(tab_space_size, segments, result):
-    """Test Rule_L003._indent_size."""
-    res = Rule_L003._indent_size(segments=segments, tab_space_size=tab_space_size)
-    assert res == result
-
-
-@pytest.mark.parametrize(
-    "expected,found,compared_to,has_partial_indent,expected_message",
-    [
-        (
-            1,
-            1,
-            4,
-            True,
-            "Expected 1 indentation, found more than 1 [compared to line 04]",
-        ),
-        (
-            2,
-            1,
-            10,
-            False,
-            "Expected 2 indentations, found 1 [compared to line 10]",
-        ),
-        (
-            2,
-            1,
-            11,
-            True,
-            "Expected 2 indentations, found less than 2 [compared to line 11]",
-        ),
-    ],
-)
-def test__rules__std_L003_desc(
-    expected, found, compared_to, has_partial_indent, expected_message
-):
-    """Test Rule_L003 error description."""
-    assert _Desc(expected, found, compared_to, has_partial_indent) == expected_message
diff --git a/test/rules/std_L008_test.py b/test/rules/std_L008_test.py
deleted file mode 100644
index 52823d0..0000000
--- a/test/rules/std_L008_test.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""Tests the python routines within L008."""
-import sqlfluff
-
-
-def test__rules__std_L008_single_raise() -> None:
-    """Test case for multiple L008 errors raised when no post comma whitespace."""
-    # This query used to triple count L008. Added memory to log previously fixed commas
-    # (issue #2001).
-    sql = """
-    SELECT
-        col_a AS a
-        ,col_b AS b
-    FROM foo;
-    """
-    result = sqlfluff.lint(sql, rules=["L008", "L019"])
-
-    results_L008 = [r for r in result if r["code"] == "L008"]
-    results_L019 = [r for r in result if r["code"] == "L019"]
-    assert len(results_L008) == 1
-    assert len(results_L019) == 1
diff --git a/test/rules/std_L027_test.py b/test/rules/std_L027_test.py
deleted file mode 100644
index 3190150..0000000
--- a/test/rules/std_L027_test.py
+++ /dev/null
@@ -1,15 +0,0 @@
-"""Tests the python routines within L027."""
-
-import sqlfluff
-
-
-def test__rules__std_L027_wildcard_single_count():
-    """Verify that L027 is only raised once for wildcard (see issue #1973)."""
-    sql = """
-        SELECT *
-        FROM foo
-        INNER JOIN bar;
-    """
-    result = sqlfluff.lint(sql)
-    assert "L027" in [r["code"] for r in result]
-    assert [r["code"] for r in result].count("L027") == 1
diff --git a/test/rules/std_L048_test.py b/test/rules/std_L048_test.py
deleted file mode 100644
index 9fe9e68..0000000
--- a/test/rules/std_L048_test.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""Tests the python routines within L048."""
-import sqlfluff
-
-
-def test__rules__std_L048_raised() -> None:
-    """L048 is raised for quoted literals not surrounded by a single whitespace."""
-    sql = "SELECT a +'b'+'c' FROM tbl;"
-    result = sqlfluff.lint(sql)
-
-    assert len(result) == 7
-
-    results_l048 = [r for r in result if r["code"] == "L048"]
-    assert len(results_l048) == 3
-    assert (
-        results_l048[0]["description"]
-        == "Expected single whitespace between binary operator '+' and quoted literal."
-    )
-    assert (
-        results_l048[1]["description"]
-        == "Expected single whitespace between quoted literal and binary operator '+'."
-    )
-    assert (
-        results_l048[2]["description"]
-        == "Expected single whitespace between binary operator '+' and quoted literal."
-    )
diff --git a/test/rules/std_LT01_LT02_LT09_combo_test.py b/test/rules/std_LT01_LT02_LT09_combo_test.py
new file mode 100644
index 0000000..82effd1
--- /dev/null
+++ b/test/rules/std_LT01_LT02_LT09_combo_test.py
@@ -0,0 +1,36 @@
+"""Tests issue #1373 doesn't reoccur.
+
+The combination of LT02 (incorrect indentation), LT09 (select targets),
+and LT01 (unnecessary white space) can result in incorrect indentation.
+"""
+
+import sqlfluff
+
+
+def test__rules__std_LT02_LT09_LT01():
+    """Verify that double indents don't flag LT01."""
+    sql = """
+WITH example AS (
+    SELECT my_id,
+        other_thing,
+        one_more
+    FROM
+        my_table
+)
+
+SELECT my_id
+FROM example\n"""
+    fixed_sql = """
+WITH example AS (
+    SELECT
+        my_id,
+        other_thing,
+        one_more
+    FROM
+        my_table
+)
+
+SELECT my_id
+FROM example\n"""
+    result = sqlfluff.fix(sql, exclude_rules=["LT13"])
+    assert result == fixed_sql
diff --git a/test/rules/std_LT01_LT04_test.py b/test/rules/std_LT01_LT04_test.py
new file mode 100644
index 0000000..eaa742a
--- /dev/null
+++ b/test/rules/std_LT01_LT04_test.py
@@ -0,0 +1,20 @@
+"""Tests the python routines within LT01."""
+import sqlfluff
+
+
+def test__rules__std_LT01_single_raise() -> None:
+    """Test case for multiple LT01 errors raised when no post comma whitespace."""
+    # This query used to triple count LT01. Added memory to log previously fixed commas
+    # (issue #2001).
+    sql = """
+SELECT
+    col_a AS a
+    ,col_b AS b
+FROM foo;
+"""
+    result = sqlfluff.lint(sql, rules=["LT01", "LT04"])
+
+    results_LT01 = [r for r in result if r["code"] == "LT01"]
+    results_LT04 = [r for r in result if r["code"] == "LT04"]
+    assert len(results_LT01) == 1
+    assert len(results_LT04) == 1
diff --git a/test/rules/std_L003_L065_combo_test.py b/test/rules/std_LT02_LT11_combo_test.py
similarity index 54%
rename from test/rules/std_L003_L065_combo_test.py
rename to test/rules/std_LT02_LT11_combo_test.py
index 9c5c221..5d8ea1a 100644
--- a/test/rules/std_L003_L065_combo_test.py
+++ b/test/rules/std_LT02_LT11_combo_test.py
@@ -1,17 +1,17 @@
-"""Tests the combination of L003 and L065.
+"""Tests the combination of LT02 and LT11.
 
-L003: Indentation not consistent with previous lines
-L065: Set operators should be surrounded by newlines
+LT02: Indentation not consistent with previous lines
+LT11: Set operators should be surrounded by newlines
 
-Auto fix of L065 does not insert correct indentation but just Newlines. It relies on
-L003 to sort out the indentation later. This is what is getting tested here.
+Auto fix of LT11 does not insert correct indentation but just Newlines. It relies on
+LT02 to sort out the indentation later. This is what is getting tested here.
 """
 
 import sqlfluff
 
 
-def test__rules__std_L003_L065_union_all_in_subquery_lint():
-    """Verify a that L065 reports lint errors in subqueries."""
+def test__rules__std_LT02_LT11_union_all_in_subquery_lint():
+    """Verify a that LT11 reports lint errors in subqueries."""
     sql = (
         "SELECT * FROM (\n"
         "    SELECT 'g' UNION ALL\n"
@@ -21,11 +21,11 @@ def test__rules__std_L003_L065_union_all_in_subquery_lint():
     )
     result = sqlfluff.lint(sql)
 
-    assert "L065" in [r["code"] for r in result]
+    assert "LT11" in [r["code"] for r in result]
 
 
-def test__rules__std_L003_L065_union_all_in_subquery_fix():
-    """Verify combination of rules L003 and L065 produces a correct indentation."""
+def test__rules__std_LT02_LT11_union_all_in_subquery_fix():
+    """Verify combination of rules LT02 and LT11 produces a correct indentation."""
     sql = (
         "SELECT c FROM (\n"
         "    SELECT 'g' UNION ALL\n"
diff --git a/test/rules/std_L007_test.py b/test/rules/std_LT03_test.py
similarity index 84%
rename from test/rules/std_L007_test.py
rename to test/rules/std_LT03_test.py
index ef2ad96..1f64cdb 100644
--- a/test/rules/std_L007_test.py
+++ b/test/rules/std_LT03_test.py
@@ -1,4 +1,4 @@
-"""Tests the python routines within L007."""
+"""Tests the python routines within LT03."""
 
 import sqlfluff
 from sqlfluff.core.config import FluffConfig
@@ -12,8 +12,8 @@ EXPECTED_TRAILING_MESSAGE = (
 )
 
 
-def test__rules__std_L007_default():
-    """Verify that L007 returns the correct error message for default (trailing)."""
+def test__rules__std_LT03_default():
+    """Verify that LT03 returns the correct error message for default (trailing)."""
     sql = """
         SELECT
             a,
@@ -24,11 +24,11 @@ def test__rules__std_L007_default():
             b = 2
     """
     result = sqlfluff.lint(sql)
-    assert "L007" in [r["code"] for r in result]
+    assert "LT03" in [r["code"] for r in result]
     assert EXPECTED_LEADING_MESSAGE in [r["description"] for r in result]
 
 
-def test__rules__std_L007_leading():
+def test__rules__std_LT03_leading():
     """Verify correct error message when leading is used."""
     sql = """
         SELECT
@@ -47,11 +47,11 @@ def test__rules__std_L007_leading():
     linter = Linter(config=config)
     result_records = linter.lint_string_wrapped(sql).as_records()
     result = result_records[0]["violations"]
-    assert "L007" in [r["code"] for r in result]
+    assert "LT03" in [r["code"] for r in result]
     assert EXPECTED_LEADING_MESSAGE in [r["description"] for r in result]
 
 
-def test__rules__std_L007_trailing():
+def test__rules__std_LT03_trailing():
     """Verify correct error message when trailing is used."""
     sql = """
         SELECT
@@ -72,5 +72,5 @@ def test__rules__std_L007_trailing():
     linter = Linter(config=config)
     result_records = linter.lint_string_wrapped(sql).as_records()
     result = result_records[0]["violations"]
-    assert "L007" in [r["code"] for r in result]
+    assert "LT03" in [r["code"] for r in result]
     assert EXPECTED_TRAILING_MESSAGE in [r["description"] for r in result]
diff --git a/test/rules/std_L019_test.py b/test/rules/std_LT04_test.py
similarity index 85%
rename from test/rules/std_L019_test.py
rename to test/rules/std_LT04_test.py
index 9a06955..b0a9c37 100644
--- a/test/rules/std_L019_test.py
+++ b/test/rules/std_LT04_test.py
@@ -1,10 +1,10 @@
-"""Tests the python routines within L019."""
+"""Tests the python routines within LT04."""
 
 import sqlfluff
 
 
-def test__rules__std_L019_unparseable():
-    """Verify that L019 doesn't try to fix queries with parse errors.
+def test__rules__std_LT04_unparseable():
+    """Verify that LT04 doesn't try to fix queries with parse errors.
 
     This has been observed to frequently cause syntax errors, especially in
     combination with Jinja templating, e.g. undefined template variables.
@@ -35,4 +35,4 @@ def test__rules__std_L019_unparseable():
           t
     """
     result = sqlfluff.lint(sql)
-    assert "L019" not in [r["code"] for r in result]
+    assert "LT04" not in [r["code"] for r in result]
diff --git a/test/rules/std_L016_L36_combo_test.py b/test/rules/std_LT05_LT09_combo_test.py
similarity index 62%
rename from test/rules/std_L016_L36_combo_test.py
rename to test/rules/std_LT05_LT09_combo_test.py
index 8d09246..f27e09c 100644
--- a/test/rules/std_L016_L36_combo_test.py
+++ b/test/rules/std_LT05_LT09_combo_test.py
@@ -1,25 +1,25 @@
-"""Tests the combination of L016 and L036.
+"""Tests the combination of LT05 and LT09.
 
-L016: no long lines
-L036: single selects should be on SELECT line
+LT05: no long lines
+LT09: single selects should be on SELECT line
 """
 
 import sqlfluff
 
 
-def test__rules__std_L016_L036_long_line_lint():
-    """Verify a long line that causes a clash between L016 and L036 is not changed."""
+def test__rules__std_LT05_LT09_long_line_lint():
+    """Verify a long line that causes a clash between LT05 and LT09 is not changed."""
     sql = (
         "SELECT\n1000000000000000000000000000000000000000000000000000000000000000000000"
         "000000000000000000000000000000\n"
     )
     result = sqlfluff.lint(sql)
-    assert "L016" in [r["code"] for r in result]
-    assert "L036" in [r["code"] for r in result]
+    assert "LT05" in [r["code"] for r in result]
+    assert "LT09" in [r["code"] for r in result]
 
 
-def test__rules__std_L016_L036_long_line_fix():
-    """Verify clash between L016 & L036 does not add multiple newlines (see #1424)."""
+def test__rules__std_LT05_LT09_long_line_fix():
+    """Verify clash between LT05 & LT09 does not add multiple newlines (see #1424)."""
     sql = (
         "SELECT 10000000000000000000000000000000000000000000000000000000000000000000000"
         "00000000000000000000000000000\n"
@@ -31,8 +31,8 @@ def test__rules__std_L016_L036_long_line_fix():
     )
 
 
-def test__rules__std_L016_L036_long_line_fix2():
-    """Verify clash between L016 & L036 does not add multiple newlines (see #1424)."""
+def test__rules__std_LT05_LT09_long_line_fix2():
+    """Verify clash between LT05 & LT09 does not add multiple newlines (see #1424)."""
     sql = (
         "SELECT\n    100000000000000000000000000000000000000000000000000000000000000000"
         "0000000000000000000000000000000000\n"
diff --git a/test/rules/std_L009_L052_test.py b/test/rules/std_LT12_CV06_test.py
similarity index 65%
rename from test/rules/std_L009_L052_test.py
rename to test/rules/std_LT12_CV06_test.py
index 06b9212..f161220 100644
--- a/test/rules/std_L009_L052_test.py
+++ b/test/rules/std_LT12_CV06_test.py
@@ -1,23 +1,26 @@
-"""Tests the python routines within L009 and L052."""
+"""Tests the python routines within LT12 and CV06."""
 from sqlfluff.core import FluffConfig
 from sqlfluff.core import Linter
 
 
-def test__rules__std_L009_and_L052_interaction() -> None:
-    """Test interaction between L009 and L052 doesn't stop L052 from being applied."""
+def test__rules__std_LT12_and_CV06_interaction() -> None:
+    """Test interaction between LT12 and CV06 doesn't stop CV06 from being applied."""
     # Test sql with no final newline and no final semicolon.
     sql = "SELECT foo FROM bar"
 
     # Ensure final semicolon requirement is active.
     cfg = FluffConfig(overrides={"dialect": "ansi"})
-    cfg.set_value(config_path=["rules", "L052", "require_final_semicolon"], val=True)
+    cfg.set_value(
+        config_path=["rules", "convention.terminator", "require_final_semicolon"],
+        val=True,
+    )
     linter = Linter(config=cfg)
 
     # Return linted/fixed file.
     linted_file = linter.lint_string(sql, fix=True)
 
     # Check expected lint errors are raised.
-    assert set([v.rule.code for v in linted_file.violations]) == {"L009", "L052"}
+    assert set([v.rule.code for v in linted_file.violations]) == {"LT12", "CV06"}
 
     # Check file is fixed.
     assert linted_file.fix_string()[0] == "SELECT foo FROM bar;\n"
diff --git a/test/rules/std_RF02_test.py b/test/rules/std_RF02_test.py
new file mode 100644
index 0000000..e9a7746
--- /dev/null
+++ b/test/rules/std_RF02_test.py
@@ -0,0 +1,15 @@
+"""Tests the python routines within RF02."""
+
+import sqlfluff
+
+
+def test__rules__std_RF02_wildcard_single_count():
+    """Verify that RF02 is only raised once for wildcard (see issue #1973)."""
+    sql = """
+        SELECT *
+        FROM foo
+        INNER JOIN bar;
+    """
+    result = sqlfluff.lint(sql)
+    assert "RF02" in [r["code"] for r in result]
+    assert [r["code"] for r in result].count("RF02") == 1
diff --git a/test/rules/std_L045_test.py b/test/rules/std_ST03_test.py
similarity index 63%
rename from test/rules/std_L045_test.py
rename to test/rules/std_ST03_test.py
index eb5e1e2..2da7065 100644
--- a/test/rules/std_L045_test.py
+++ b/test/rules/std_ST03_test.py
@@ -1,10 +1,10 @@
-"""Tests the python routines within L045."""
+"""Tests the python routines within ST03."""
 
 import sqlfluff
 
 
-def test__rules__std_L045_multiple_unused_ctes():
-    """Verify that L045 returns multiple lint issues, one per unused CTE."""
+def test__rules__std_ST03_multiple_unused_ctes():
+    """Verify that ST03 returns multiple lint issues, one per unused CTE."""
     sql = """
     WITH
     cte_1 AS (
@@ -23,24 +23,27 @@ def test__rules__std_L045_multiple_unused_ctes():
     SELECT var_bar
     FROM cte_3
     """
-    result = sqlfluff.lint(sql, rules=["L045"])
+    result = sqlfluff.lint(sql, rules=["ST03"])
     assert result == [
         {
-            "code": "L045",
+            "code": "ST03",
             "description": 'Query defines CTE "cte_1" but does not use it.',
             "line_no": 3,
             "line_pos": 5,
+            "name": "structure.unused_cte",
         },
         {
-            "code": "L045",
+            "code": "ST03",
             "description": 'Query defines CTE "cte_2" but does not use it.',
             "line_no": 6,
             "line_pos": 5,
+            "name": "structure.unused_cte",
         },
         {
-            "code": "L045",
+            "code": "ST03",
             "description": 'Query defines CTE "cte_4" but does not use it.',
             "line_no": 12,
             "line_pos": 5,
+            "name": "structure.unused_cte",
         },
     ]
diff --git a/test/rules/std_roundtrip_test.py b/test/rules/std_roundtrip_test.py
index ef35628..5df1985 100644
--- a/test/rules/std_roundtrip_test.py
+++ b/test/rules/std_roundtrip_test.py
@@ -113,12 +113,12 @@ def jinja_roundtrip_test(
 @pytest.mark.parametrize(
     "rule,path",
     [
-        ("L001", "test/fixtures/linter/indentation_errors.sql"),
-        ("L008", "test/fixtures/linter/whitespace_errors.sql"),
-        ("L008", "test/fixtures/linter/indentation_errors.sql"),
-        ("L010", "test/fixtures/linter/whitespace_errors.sql"),
-        ("L011", "test/fixtures/dialects/ansi/select_simple_i.sql"),
-        ("L012", "test/fixtures/dialects/ansi/select_simple_i.sql"),
+        ("LT01", "test/fixtures/linter/indentation_errors.sql"),
+        ("LT01", "test/fixtures/linter/whitespace_errors.sql"),
+        ("LT01", "test/fixtures/linter/indentation_errors.sql"),
+        ("CP01", "test/fixtures/linter/whitespace_errors.sql"),
+        ("AL01", "test/fixtures/dialects/ansi/select_simple_i.sql"),
+        ("AL02", "test/fixtures/dialects/ansi/select_simple_i.sql"),
     ],
 )
 def test__cli__command__fix(rule, path):
@@ -126,7 +126,7 @@ def test__cli__command__fix(rule, path):
     generic_roundtrip_test(path, rule)
 
 
-@pytest.mark.parametrize("rule", ["L010", "L001"])
+@pytest.mark.parametrize("rule", ["CP01", "LT01"])
 def test__cli__command__fix_templated(rule):
     """Roundtrip test, making sure that we don't drop tags while templating."""
     jinja_roundtrip_test("test/fixtures/templater/jinja_d_roundtrip", rule)
diff --git a/test/rules/std_test.py b/test/rules/std_test.py
index 74bf790..470da6a 100644
--- a/test/rules/std_test.py
+++ b/test/rules/std_test.py
@@ -9,79 +9,66 @@ from sqlfluff.utils.testing.rules import assert_rule_raises_violations_in_file
 @pytest.mark.parametrize(
     "rule,path,violations",
     [
-        ("L001", "indentation_errors.sql", [(4, 24)]),
-        ("L002", "indentation_errors.sql", [(3, 1), (4, 1)]),
+        ("LT01", "indentation_errors.sql", [(4, 24)]),
         (
-            "L003",
+            "LT02",
             "indentation_errors.sql",
-            [(2, 4), (3, 4), (4, 6)],
+            [(2, 1), (3, 1), (4, 1), (5, 1)],
         ),
-        (
-            "L004",
-            "indentation_errors.sql",
-            [(3, 1), (4, 1), (5, 1)],
-        ),
-        # Check we get comma (with leading space/newline) whitespace errors
-        # NB The newline before the comma, should report on the comma, not the newline
-        # for clarity.
-        ("L005", "whitespace_errors.sql", [(2, 9)]),
-        # Check we get comma (with incorrect trailing space) whitespace errors,
-        # but also no false positives on line 4 or 5.
-        ("L008", "whitespace_errors.sql", [(3, 12)]),
+        # Check we get comma whitespace errors
+        ("LT01", "whitespace_errors.sql", [(2, 9), (3, 12)]),
         # Check we get operator whitespace errors and it works with brackets
         (
-            "L006",
-            "operator_errors.sql",
-            [(7, 6), (7, 7), (7, 9), (7, 10), (7, 12), (7, 13)],
-        ),
-        (
-            "L039",
+            "LT01",
             "operator_errors.sql",
-            [(3, 8), (4, 10)],
+            [(3, 8), (4, 10), (7, 6), (7, 7), (7, 9), (7, 10), (7, 12), (7, 13)],
         ),
-        ("L007", "operator_errors.sql", [(5, 9)]),
-        # Check we DO get a violation on line 2 but NOT on line 3 (between L006 & L039)
+        ("LT03", "operator_errors.sql", [(5, 9)]),
         (
-            "L006",
+            "LT01",
             "operator_errors_negative.sql",
-            [(5, 6), (5, 7)],
-        ),
-        (
-            "L039",
-            "operator_errors_negative.sql",
-            [(2, 6), (2, 9)],
+            [(2, 6), (2, 9), (5, 6), (5, 7)],
         ),
         # Hard indentation errors
         (
-            "L003",
+            "LT02",
             "indentation_error_hard.sql",
-            [(2, 4), (6, 5), (9, 13), (14, 14), (19, 5), (20, 6)],
+            [
+                (2, 1),
+                (6, 1),
+                (9, 1),
+                (11, 15),
+                (12, 1),
+                (12, 33),
+                (13, 15),
+                (14, 1),
+                (14, 36),
+                (18, 1),
+                (19, 1),
+                (20, 1),
+            ],
         ),
         # Check bracket handling with closing brackets and contained indents works.
-        ("L003", "indentation_error_contained.sql", []),
+        ("LT02", "indentation_error_contained.sql", []),
         # Check we handle block comments as expect. Github #236
         (
-            "L016",
+            "LT05",
             "block_comment_errors.sql",
-            [(1, 121), (2, 99), (4, 88)],
+            # Errors should flag on the first element of the line.
+            [(1, 1), (2, 5), (4, 5)],
         ),
-        ("L016", "block_comment_errors_2.sql", [(1, 85), (2, 86)]),
+        ("LT05", "block_comment_errors_2.sql", [(1, 1), (2, 1)]),
         # Column references
-        ("L027", "column_references.sql", [(1, 8)]),
-        ("L027", "column_references_bare_function.sql", []),
-        ("L026", "column_references.sql", [(1, 11)]),
-        ("L025", "column_references.sql", [(2, 11)]),
+        ("RF02", "column_references.sql", [(1, 8)]),
+        ("RF02", "column_references_bare_function.sql", []),
+        ("RF01", "column_references.sql", [(1, 11)]),
+        ("AL05", "column_references.sql", [(2, 11)]),
         # Distinct and Group by
-        ("L021", "select_distinct_group_by.sql", [(1, 8)]),
+        ("AM01", "select_distinct_group_by.sql", [(1, 8)]),
         # Make sure that ignoring works as expected
-        ("L006", "operator_errors_ignore.sql", [(10, 8), (10, 9)]),
-        (
-            "L031",
-            "aliases_in_join_error.sql",
-            [(6, 15), (7, 19), (8, 16)],
-        ),
+        ("LT01", "operator_errors_ignore.sql", [(10, 8), (10, 9)]),
         (
-            "L046",
+            "JJ01",
             "heavy_templating.sql",
             [(12, 13), (12, 25)],
         ),
@@ -100,17 +87,14 @@ def test__rules__std_file(rule, path, violations):
 @pytest.mark.parametrize(
     "rule_config_dict",
     [
-        {"tab_space_size": "blah"},
-        {"max_line_length": "blah"},
-        {"indent_unit": "blah"},
         {"allow_scalar": "blah"},
         {"single_table_references": "blah"},
         {"unquoted_identifiers_policy": "blah"},
-        {"L010": {"capitalisation_policy": "blah"}},
-        {"L011": {"aliasing": "blah"}},
-        {"L012": {"aliasing": "blah"}},
-        {"L014": {"extended_capitalisation_policy": "blah"}},
-        {"L030": {"capitalisation_policy": "blah"}},
+        {"capitalisation.keywords": {"capitalisation_policy": "blah"}},
+        {"aliasing.table": {"aliasing": "blah"}},
+        {"aliasing.column": {"aliasing": "blah"}},
+        {"capitalisation.identifiers": {"extended_capitalisation_policy": "blah"}},
+        {"capitalisation.functions": {"capitalisation_policy": "blah"}},
     ],
 )
 def test_improper_configs_are_rejected(rule_config_dict):
@@ -119,4 +103,4 @@ def test_improper_configs_are_rejected(rule_config_dict):
         configs={"rules": rule_config_dict}, overrides={"dialect": "ansi"}
     )
     with pytest.raises(ValueError):
-        get_ruleset().get_rulelist(config)
+        get_ruleset().get_rulepack(config)
diff --git a/test/rules/yaml_test_cases_test.py b/test/rules/yaml_test_cases_test.py
index 03828fd..a47a27e 100644
--- a/test/rules/yaml_test_cases_test.py
+++ b/test/rules/yaml_test_cases_test.py
@@ -7,7 +7,6 @@ from sqlfluff.utils.testing.rules import (
     rules__test_helper,
     get_rule_from_set,
 )
-from sqlfluff.core.rules.doc_decorators import is_fix_compatible
 from sqlfluff.core.config import FluffConfig
 
 ids, test_cases = load_test_cases(
@@ -15,6 +14,8 @@ ids, test_cases = load_test_cases(
 )
 
 
+@pytest.mark.integration
+@pytest.mark.rules_suite
 @pytest.mark.parametrize("test_case", test_cases, ids=ids)
 def test__rule_test_case(test_case, caplog):
     """Run the tests."""
@@ -24,10 +25,8 @@ def test__rule_test_case(test_case, caplog):
             if res is not None and res != test_case.fail_str:
                 cfg = FluffConfig(configs=test_case.configs)
                 rule = get_rule_from_set(test_case.rule, config=cfg)
-                assert is_fix_compatible(
-                    rule
-                ), f"Rule {test_case.rule} returned fixes but does not specify "
-                '"@document_fix_compatible".'
+                assert rule.is_fix_compatible, f"Rule {test_case.rule} returned "
+                'fixes but does not specify "is_fix_compatible = True".'
 
 
 def test__rule_test_global_config():
diff --git a/test/test_testing.py b/test/test_testing.py
index dd4c223..884e331 100644
--- a/test/test_testing.py
+++ b/test/test_testing.py
@@ -20,22 +20,22 @@ def test_assert_rule_fail_in_sql_handle_parse_error():
 def test_assert_rule_fail_in_sql_should_fail_queries_that_unexpectedly_pass():
     """Util assert_rule_fail_in_sql should fail if no failure."""
     with pytest.raises(Failed) as failed_test:
-        assert_rule_fail_in_sql(code="L001", sql="select 1")
-    failed_test.match("No L001 failures found in query which should fail")
+        assert_rule_fail_in_sql(code="LT01", sql="select 1")
+    failed_test.match("No LT01 failures found in query which should fail")
 
 
 def test_assert_rule_pass_in_sql_should_handle_parse_error():
     """Util assert_rule_pass_in_sql should handle parse errors."""
     with pytest.raises(Failed) as failed_test:
-        assert_rule_pass_in_sql(code="L001", sql="select from")
+        assert_rule_pass_in_sql(code="LT01", sql="select from")
     failed_test.match("Found unparsable section:")
 
 
 def test_assert_rule_pass_in_sql_should_fail_when_there_are_violations():
     """Util assert_rule_pass_in_sql should fail when there are violations."""
     with pytest.raises(Failed) as failed_test:
-        assert_rule_pass_in_sql(code="L005", sql="select a , b from t")
-    failed_test.match("Found L005 failures in query which should pass")
+        assert_rule_pass_in_sql(code="LT01", sql="select a , b from t")
+    failed_test.match("Found LT01 failures in query which should pass")
 
 
 def test_rules__test_helper_skipped_when_test_case_skipped():
@@ -49,7 +49,7 @@ def test_rules__test_helper_skipped_when_test_case_skipped():
 def test_rules__test_helper_has_variable_introspection(test_verbosity_level):
     """Make sure the helper gives variable introspection information on failure."""
     rule_test_case = RuleTestCase(
-        rule="L003",
+        rule="LT02",
         fail_str="""
             select
                 a,
diff --git a/test/utils/analysis/test_select_crawler.py b/test/utils/analysis/test_select_crawler.py
index 3b8dcbd..021f6a8 100644
--- a/test/utils/analysis/test_select_crawler.py
+++ b/test/utils/analysis/test_select_crawler.py
@@ -32,7 +32,7 @@ from sqlfluff.utils.analysis import select_crawler
             },
         ),
         (
-            # Nested CTEs (from L044 test suite)
+            # Nested CTEs (from AM04 test suite)
             """
         with a as (
             with b as (select 1 from c)
@@ -53,7 +53,7 @@ from sqlfluff.utils.analysis import select_crawler
             },
         ),
         (
-            # Nested CTEs (from L044 test suite)
+            # Nested CTEs (from AM04 test suite)
             """
         with b as (select 1 from c)
         select * from (
diff --git a/test/utils/reflow/depthmap_test.py b/test/utils/reflow/depthmap_test.py
index bc50c8d..7039c9c 100644
--- a/test/utils/reflow/depthmap_test.py
+++ b/test/utils/reflow/depthmap_test.py
@@ -2,7 +2,7 @@
 
 from sqlfluff.core import Linter
 
-from sqlfluff.utils.reflow.depthmap import DepthMap
+from sqlfluff.utils.reflow.depthmap import DepthMap, StackPosition
 
 
 def parse_ansi_string(sql, config):
@@ -64,3 +64,45 @@ def test_reflow_depthmap_from_raws_and_root(default_config):
     # The depth info dict depends on the sequence so we only need
     # to check those are equal.
     assert dm_direct.depth_info == dm_indirect.depth_info
+
+
+def test_reflow_depthmap_order_by(default_config):
+    """Test depth mapping of an order by clause."""
+    sql = "SELECT * FROM foo ORDER BY bar DESC\n"
+    root = parse_ansi_string(sql, default_config)
+    # Get the `ORDER` and `DESC` segments.
+    order_seg = None
+    desc_seg = None
+    for raw in root.raw_segments:
+        if raw.raw_upper == "ORDER":
+            order_seg = raw
+        elif raw.raw_upper == "DESC":
+            desc_seg = raw
+    # Make sure we find them
+    assert order_seg
+    assert desc_seg
+
+    # Generate a depth map
+    depth_map = DepthMap.from_parent(root)
+    # Check their depth info
+    order_seg_di = depth_map.get_depth_info(order_seg)
+    desc_seg_di = depth_map.get_depth_info(desc_seg)
+    # Make sure they both contain an order by clause.
+    assert frozenset({"base", "orderby_clause"}) in order_seg_di.stack_class_types
+    assert frozenset({"base", "orderby_clause"}) in desc_seg_di.stack_class_types
+    # Get the ID of one and make sure it's in the other
+    order_by_hash = order_seg_di.stack_hashes[
+        order_seg_di.stack_class_types.index(frozenset({"base", "orderby_clause"}))
+    ]
+    assert order_by_hash in order_seg_di.stack_hashes
+    assert order_by_hash in desc_seg_di.stack_hashes
+    # Get the position information
+    order_stack_pos = order_seg_di.stack_positions[order_by_hash]
+    desc_stack_pos = desc_seg_di.stack_positions[order_by_hash]
+    # Make sure the position information is correct
+    print(order_stack_pos)
+    print(desc_stack_pos)
+    assert order_stack_pos == StackPosition(idx=0, len=9, type="start")
+    # NOTE: Even though idx 7 is not the end, the _type_ of this location
+    # is still an "end" because the following elements are non-code.
+    assert desc_stack_pos == StackPosition(idx=7, len=9, type="end")
diff --git a/test/utils/reflow/rebreak_test.py b/test/utils/reflow/rebreak_test.py
index de9394b..a422c93 100644
--- a/test/utils/reflow/rebreak_test.py
+++ b/test/utils/reflow/rebreak_test.py
@@ -28,7 +28,7 @@ def parse_ansi_string(sql, config):
         ("select 1+\n2", "select 1\n+ 2"),  # NOTE: Implicit respace.
         ("select\n  1 +\n  2", "select\n  1\n  + 2"),
         ("select\n  1 +\n  -- comment\n  2", "select\n  1\n  -- comment\n  + 2"),
-        # These rely on the details config being for trailing commas
+        # These rely on the default config being for trailing commas
         ("select a,b", "select a,b"),
         ("select a\n,b", "select a,\nb"),
         ("select\n  a\n  , b", "select\n  a,\n  b"),
diff --git a/test/utils/reflow/reindent_test.py b/test/utils/reflow/reindent_test.py
index fd4d488..fea23bc 100644
--- a/test/utils/reflow/reindent_test.py
+++ b/test/utils/reflow/reindent_test.py
@@ -10,9 +10,17 @@ import logging
 import pytest
 
 from sqlfluff.core import Linter
+from sqlfluff.core.parser.segments.base import BaseSegment
 
+from sqlfluff.utils.reflow.helpers import fixes_from_results
 from sqlfluff.utils.reflow.sequence import ReflowSequence
-from sqlfluff.utils.reflow.reindent import deduce_line_indent
+from sqlfluff.utils.reflow.helpers import deduce_line_indent
+from sqlfluff.utils.reflow.reindent import (
+    lint_indent_points,
+    _crawl_indent_points,
+    _IndentPoint,
+    _IndentLine,
+)
 
 
 def parse_ansi_string(sql, config):
@@ -118,3 +126,591 @@ def test_reflow__deduce_line_indent(
         result = deduce_line_indent(target_seg, root)
 
     assert result == indent_out
+
+
+@pytest.mark.parametrize(
+    "raw_sql_in,points_out",
+    [
+        # Trivial
+        (
+            "select 1",
+            [
+                # No point at the start.
+                # Point after select (not newline)
+                _IndentPoint(
+                    idx=1,
+                    indent_impulse=1,
+                    indent_trough=0,
+                    initial_indent_balance=0,
+                    last_line_break_idx=None,
+                    is_line_break=False,
+                    untaken_indents=(),
+                ),
+                # Point after 1 (not newline either)
+                _IndentPoint(
+                    idx=3,
+                    indent_impulse=-1,
+                    indent_trough=-1,
+                    initial_indent_balance=1,
+                    last_line_break_idx=None,
+                    is_line_break=False,
+                    untaken_indents=(1,),
+                ),
+            ],
+        ),
+        (
+            "\nselect 1\n",
+            [
+                # Start point
+                _IndentPoint(
+                    idx=0,
+                    indent_impulse=0,
+                    indent_trough=0,
+                    initial_indent_balance=0,
+                    last_line_break_idx=None,
+                    is_line_break=True,
+                    untaken_indents=(),
+                ),
+                # Point after select (not newline)
+                _IndentPoint(
+                    idx=2,
+                    indent_impulse=1,
+                    indent_trough=0,
+                    initial_indent_balance=0,
+                    last_line_break_idx=0,
+                    is_line_break=False,
+                    untaken_indents=(),
+                ),
+                # Point after 1 (is newline)
+                _IndentPoint(
+                    idx=4,
+                    indent_impulse=-1,
+                    indent_trough=-1,
+                    initial_indent_balance=1,
+                    last_line_break_idx=0,
+                    is_line_break=True,
+                    untaken_indents=(1,),
+                ),
+            ],
+        ),
+        (
+            "select\n1",
+            [
+                # No point at the start.
+                # Point after select (not newline)
+                _IndentPoint(
+                    idx=1,
+                    indent_impulse=1,
+                    indent_trough=0,
+                    initial_indent_balance=0,
+                    last_line_break_idx=None,
+                    is_line_break=True,
+                    untaken_indents=(),
+                ),
+                # Point after 1 (is not newline)
+                _IndentPoint(
+                    idx=3,
+                    indent_impulse=-1,
+                    indent_trough=-1,
+                    initial_indent_balance=1,
+                    last_line_break_idx=1,
+                    is_line_break=False,
+                    untaken_indents=(),
+                ),
+            ],
+        ),
+        # More stretching cases.
+        (
+            "SELECT\n    r.a,\n    s.b\nFROM r\nJOIN s\n    "
+            "ON\n        r.a = s.a\n        AND true",
+            [
+                # No point at the start.
+                # After SELECT
+                _IndentPoint(
+                    idx=1,
+                    indent_impulse=1,
+                    indent_trough=0,
+                    initial_indent_balance=0,
+                    last_line_break_idx=None,
+                    is_line_break=True,
+                    untaken_indents=(),
+                ),
+                _IndentPoint(
+                    idx=9,
+                    indent_impulse=0,
+                    indent_trough=0,
+                    initial_indent_balance=1,
+                    last_line_break_idx=1,
+                    is_line_break=True,
+                    untaken_indents=(),
+                ),
+                # Before FROM
+                _IndentPoint(
+                    idx=15,
+                    indent_impulse=-1,
+                    indent_trough=-1,
+                    initial_indent_balance=1,
+                    last_line_break_idx=9,
+                    is_line_break=True,
+                    untaken_indents=(),
+                ),
+                # Untaken indent before "r"
+                _IndentPoint(
+                    idx=17,
+                    indent_impulse=1,
+                    indent_trough=0,
+                    initial_indent_balance=0,
+                    last_line_break_idx=15,
+                    is_line_break=False,
+                    untaken_indents=(),
+                ),
+                # Before JOIN (-1 balance to take us back to
+                # baseline (in line with FROM))
+                # NOTE: It keeps the untaken indent from the
+                # previous point, but shouldn't use it.
+                _IndentPoint(
+                    idx=19,
+                    indent_impulse=-1,
+                    indent_trough=-1,
+                    initial_indent_balance=1,
+                    last_line_break_idx=15,
+                    is_line_break=True,
+                    untaken_indents=(1,),
+                ),
+                # Untaken indent before "s"
+                _IndentPoint(
+                    idx=21,
+                    indent_impulse=1,
+                    indent_trough=0,
+                    initial_indent_balance=0,
+                    last_line_break_idx=19,
+                    is_line_break=False,
+                    untaken_indents=(),
+                ),
+                # NOTE: this is an interesting one. It's a Dedent-Indent pair.
+                # There's a zero balance, and a trough of -1. We carry in the previous
+                # untaken indent. But should pass if forward after this.
+                _IndentPoint(
+                    idx=23,
+                    indent_impulse=0,
+                    indent_trough=-1,
+                    initial_indent_balance=1,
+                    last_line_break_idx=19,
+                    is_line_break=True,
+                    untaken_indents=(1,),
+                ),
+                # After ON. Default is indented_on_contents = True, so there is
+                # an indent here. We *SHOULDNT* have an untaken indent here,
+                # because while there was one at the last point, the trough
+                # of the last point should have cleared it.
+                _IndentPoint(
+                    idx=25,
+                    indent_impulse=1,
+                    indent_trough=0,
+                    initial_indent_balance=1,
+                    last_line_break_idx=23,
+                    is_line_break=True,
+                    untaken_indents=(),
+                ),
+                # Before AND
+                _IndentPoint(
+                    idx=39,
+                    indent_impulse=0,
+                    indent_trough=0,
+                    initial_indent_balance=2,
+                    last_line_break_idx=25,
+                    is_line_break=True,
+                    untaken_indents=(),
+                ),
+                # after "true"
+                _IndentPoint(
+                    idx=43,
+                    indent_impulse=-2,
+                    indent_trough=-2,
+                    initial_indent_balance=2,
+                    last_line_break_idx=39,
+                    is_line_break=False,
+                    untaken_indents=(),
+                ),
+            ],
+        ),
+        (
+            "SELECT *\nFROM t1\nJOIN t2 ON true\nAND true",
+            [
+                # No point at the start.
+                # NOTE: Abbreviated notation given much is the same as above.
+                # After SELECT
+                _IndentPoint(1, 1, 0, 0, None, False, ()),
+                _IndentPoint(3, -1, -1, 1, None, True, (1,)),
+                _IndentPoint(5, 1, 0, 0, 3, False, ()),
+                _IndentPoint(7, -1, -1, 1, 3, True, (1,)),
+                # JOIN
+                _IndentPoint(9, 1, 0, 0, 7, False, ()),
+                # TRICKY POINT (we're between "t2" and "ON").
+                # The indent between Join and t2 wasn't taken, but we're
+                # also climbing down from that here. It should be in the
+                # untaken indents _here_ but not passed forward. There is
+                # however another indent opportunity here which ALSO isn't
+                # taken, so that one *should* be passed forward.
+                _IndentPoint(11, 0, -1, 1, 7, False, (1,)),
+                # TRICKY POINT (we're between "ON" and "true").
+                # Default is indented_on_contents = True.
+                # This means that there is an additional indent here.
+                # It's not taken though. The incoming balance of 1
+                # isn't taken yet either (hence a 1 in the untaken indent).
+                _IndentPoint(13, 1, 0, 1, 7, False, (1,)),
+                # Between "true" and "AND".
+                # Balance is 2, but both untaken.
+                _IndentPoint(15, 0, 0, 2, 7, True, (1, 2)),
+                # End point
+                _IndentPoint(19, -2, -2, 2, 15, False, (1, 2)),
+            ],
+        ),
+        # Templated case
+        (
+            "SELECT\n"
+            "    {{ 'a' }}\n"
+            "    {% for c in ['d', 'e'] %}\n"
+            "    ,{{ c }}_val\n"
+            "    {% endfor %}\n",
+            [
+                # No initial indent (this is the first newline).
+                _IndentPoint(1, 1, 0, 0, None, True, ()),
+                # point after a
+                _IndentPoint(3, 0, 0, 1, 1, True, ()),
+                # point after for
+                _IndentPoint(5, 1, 0, 1, 3, True, ()),
+                # point after d_val
+                _IndentPoint(9, -1, -1, 2, 5, True, ()),
+                # point after loop
+                _IndentPoint(11, 1, 0, 1, 9, True, ()),
+                # point after e_val
+                _IndentPoint(15, -2, -2, 2, 11, True, ()),
+                # point after endfor
+                _IndentPoint(17, 0, 0, 0, 15, True, ()),
+            ],
+        ),
+        # Templated case (with consuming whitespace)
+        (
+            "{% for item in [1, 2] -%}\n"
+            "SELECT *\n"
+            "FROM some_table\n"
+            "{{ 'UNION ALL\n' if not loop.last }}\n"
+            "{%- endfor %}",
+            [
+                # No initial indent (this is the first newline).
+                # Importantly this first point - IS a newline
+                # even though that newline segment is consumed
+                # it should still be True here.
+                _IndentPoint(1, 1, 0, 0, None, True, ()),
+                # point between SELECT & *
+                _IndentPoint(3, 1, 0, 1, 1, False, ()),
+                # point after *
+                _IndentPoint(5, -1, -1, 2, 1, True, (2,)),
+                # point after FROM
+                _IndentPoint(7, 1, 0, 1, 5, False, ()),
+                # point after some_table
+                _IndentPoint(9, -1, -1, 2, 5, True, (2,)),
+                # point after ALL (we dedent down to the loop marker).
+                _IndentPoint(13, -1, -1, 1, 9, True, ()),
+                # There should be a loop marker here.
+                # point after loop marker and before SELECT
+                # (we indent back up after the loop).
+                _IndentPoint(15, 1, 0, 0, 13, True, ()),
+                # point between SELECT & *
+                _IndentPoint(17, 1, 0, 1, 15, False, ()),
+                # point after *
+                _IndentPoint(19, -1, -1, 2, 15, True, (2,)),
+                # point after FROM
+                _IndentPoint(21, 1, 0, 1, 19, False, ()),
+                # point after some_table (and before unused placeholder)
+                _IndentPoint(23, -1, -1, 2, 19, True, (2,)),
+                # Point after placeholder and dedenting down to endfor
+                _IndentPoint(25, -1, -1, 1, 23, True, ()),
+                # Point between endfor and end-of-file
+                _IndentPoint(27, 0, 0, 0, 25, False, ()),
+            ],
+        ),
+        # Templated case (with templated newline and indent)
+        (
+            "SELECT\n  {{'1 \n, 2'}}\nFROM foo",
+            [
+                # After SELECT
+                _IndentPoint(1, 1, 0, 0, None, True, ()),
+                # NOTE: The newline inside the tag isn't reported.
+                # After the templated section (hence why 7)
+                _IndentPoint(7, -1, -1, 1, 1, True, ()),
+                # After FROM
+                _IndentPoint(9, 1, 0, 0, 7, False, ()),
+                # After foo
+                _IndentPoint(11, -1, -1, 1, 7, False, (1,)),
+            ],
+        ),
+    ],
+)
+def test_reflow__crawl_indent_points(raw_sql_in, points_out, default_config, caplog):
+    """Test _crawl_indent_points directly."""
+    root = parse_ansi_string(raw_sql_in, default_config)
+    print(root.stringify())
+    seq = ReflowSequence.from_root(root, config=default_config)
+    with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"):
+        points = list(_crawl_indent_points(seq.elements))
+    assert points == points_out
+
+
+@pytest.mark.parametrize(
+    "raw_sql_in,raw_sql_out",
+    [
+        # Trivial
+        (
+            "select 1",
+            "select 1",
+        ),
+        # Initial Indent
+        (
+            "      select 1",
+            "select 1",
+        ),
+        # Trailing Newline
+        (
+            "      select 1\n",
+            "select 1\n",
+        ),
+        # Basic Multiline
+        (
+            "select\n1",
+            "select\n  1",
+        ),
+        # Advanced Multiline
+        (
+            "select\n1+(\n2+3\n),\n4\nfrom foo",
+            "select\n  1+(\n    2+3\n  ),\n  4\nfrom foo",
+        ),
+        (
+            "select\n    1+(\n    2+3\n    ),\n    4\n    from foo",
+            "select\n  1+(\n    2+3\n  ),\n  4\nfrom foo",
+        ),
+        # Multiple untaken indents. We should only indent as many
+        # times as required.
+        (
+            "   select ((((\n1\n))))",
+            "select ((((\n  1\n))))",
+        ),
+        (
+            "select (((\n((\n3\n))\n)))",
+            "select (((\n  ((\n    3\n  ))\n)))",
+        ),
+        # ### Templated Multiline Cases ###
+        # NOTE: the templated tags won't show here, but they
+        # should still be indented.
+        # Trailing tag. NOTE: Last tag indented
+        (
+            "select\n1\n{% if true %}\n+ 2\n{% endif %}",
+            "select\n  1\n  \n    + 2\n  ",
+        ),
+        # Cutting across the parse tree
+        (
+            "select\n1\n{% if true %}\n,2\nFROM a\n{% endif %}",
+            # This set of template tags cuts across the parse
+            # tree. We should indent them appropriately. In this case
+            # that should mean "case 3", picking the lowest of the
+            # existing indents which should mean no indent for either.
+            # We also shouldn't indent the contents between them either
+            # when taking this option.
+            "select\n  1\n\n  ,2\nFROM a\n",
+        ),
+        # Template tags at file ends
+        (
+            "{% if true %}\nSELECT 1\n{% endif %}",
+            "\n  SELECT 1\n",
+        ),
+        # Template loops:
+        (
+            "select\n  0,\n  {% for i in [1, 2, 3] %}\n    {{i}},\n  {% endfor %}\n  4",
+            "select\n  0,\n  \n    1,\n  \n    2,\n  \n    3,\n  \n  4",
+        ),
+        # Correction and handling of hanging indents
+        (
+            "select 1, 2",
+            "select 1, 2",
+        ),
+        (
+            "select 1,\n2",
+            "select\n  1,\n  2",
+        ),
+        (
+            "select 1,\n       2",
+            "select\n  1,\n  2",
+        ),
+        # A hanging example where we're modifying a currently empty point.
+        (
+            "select greatest(1,\n2)",
+            "select greatest(\n  1,\n  2\n)",
+        ),
+        # Test handling of many blank lines.
+        # NOTE:
+        #    1. Initial whitespace should remain, because it's not an indent.
+        #    2. Blank lines should also remain, because they're also not an indent.
+        (
+            "\n\n  \n\nselect\n\n\n\n    \n\n     1\n\n       \n\n",
+            "\n\n  \n\nselect\n\n\n\n    \n\n  1\n\n       \n\n",
+        ),
+        # Templated cases.
+        # NOTE: We're just rendering the fixed file in the templated space
+        # so that for these tests we don't touch the fix routines. That's
+        # why the template tags aren't visible - BUT THEIR INDENTS SHOULD BE.
+        # This one is useful for ensuring the tags have the same indent.
+        # ... first with a FROM
+        (
+            "SELECT\n"
+            "    {{ 'a' }}\n"
+            "    {% for c in ['d', 'e'] %}\n"
+            "    ,{{ c }}_val\n"
+            "    {% endfor %}\n"
+            "FROM foo",
+            "SELECT\n"
+            "  a\n"
+            "  \n"
+            "    ,d_val\n"
+            "  \n"
+            "    ,e_val\n"
+            "  \n"
+            "FROM foo",
+        ),
+        # ... then without a FROM
+        (
+            "SELECT\n"
+            "    {{ 'a' }}\n"
+            "    {% for c in ['d', 'e'] %}\n"
+            "    ,{{ c }}_val\n"
+            "    {% endfor %}\n",
+            "SELECT\n  a\n  \n    ,d_val\n  \n    ,e_val\n  \n",
+        ),
+        # This one is useful for if statements get handled right.
+        # NOTE: There's a template loop in the middle.
+        (
+            "SELECT\n"
+            "  {{ 'a' }}\n"
+            "  {% for c in ['d', 'e'] %}\n"
+            " {% if c == 'd' %}\n"
+            "  ,{{ c }}_val_a\n"
+            "    {% else %}\n"
+            "  ,{{ c }}_val_b\n"
+            "{% endif %}\n"
+            "  {% endfor %}\n",
+            "SELECT\n"
+            "  a\n"
+            "  \n"
+            "    \n"
+            "      ,d_val_a\n"
+            "    \n"
+            "  \n"
+            "    \n"
+            "      ,e_val_b\n"
+            "    \n"
+            "  \n",
+        ),
+        # Test leading templated newlines.
+        # https://github.com/sqlfluff/sqlfluff/issues/4485
+        (
+            "{{ '\\n   \\n   ' }}\nSELECT 1",
+            # NOTE: This looks a little strange, but what's important
+            # here is that it doesn't raise an exception.
+            "\n   \n   \nSELECT 1",
+        ),
+    ],
+)
+def test_reflow__lint_indent_points(raw_sql_in, raw_sql_out, default_config, caplog):
+    """Test the lint_indent_points() method directly.
+
+    Rather than testing directly, for brevity we check
+    the raw output it produces. This results in a more
+    compact test.
+    """
+    root = parse_ansi_string(raw_sql_in, default_config)
+    print(root.stringify())
+    seq = ReflowSequence.from_root(root, config=default_config)
+
+    with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"):
+        elements, results = lint_indent_points(seq.elements, single_indent="  ")
+
+    result_raw = "".join(elem.raw for elem in elements)
+    assert result_raw == raw_sql_out, "Raw Element Check Failed!"
+
+    # Now we've checked the elements - check that applying the fixes gets us to
+    # the same place.
+    print("Results:", results)
+    anchor_info = BaseSegment.compute_anchor_edit_info(fixes_from_results(results))
+    fixed_tree, _, _ = root.apply_fixes(
+        default_config.get("dialect_obj"), "TEST", anchor_info
+    )
+    assert fixed_tree.raw == raw_sql_out, "Element check passed - but fix check failed!"
+
+
+@pytest.mark.parametrize(
+    "indent_line, forced_indents, expected_units",
+    [
+        # Trivial case of a first line.
+        (
+            _IndentLine(0, [_IndentPoint(0, 0, 0, 0, None, False, ())]),
+            [],
+            0,
+        ),
+        # Simple cases of a normal lines.
+        (
+            _IndentLine(3, [_IndentPoint(6, 0, 0, 3, 1, True, ())]),
+            [],
+            3,
+        ),
+        (
+            # NOTE: Initial indent for *line* is different to *point*.
+            # The *line* takes precedence.
+            _IndentLine(1, [_IndentPoint(6, 0, 0, 3, 1, True, ())]),
+            [],
+            1,
+        ),
+        # Indents and dedents on the line break.
+        # NOTE: The line indent still takes precedence here.
+        (
+            _IndentLine(3, [_IndentPoint(6, 1, 0, 3, 1, True, ())]),
+            [],
+            3,
+        ),
+        (
+            _IndentLine(3, [_IndentPoint(6, -1, -1, 3, 1, True, ())]),
+            [],
+            3,
+        ),
+        # Handle untaken indents.
+        (
+            _IndentLine(3, [_IndentPoint(6, 0, 0, 3, 1, True, (1,))]),
+            [],
+            2,
+        ),
+        (
+            _IndentLine(3, [_IndentPoint(6, 0, 0, 3, 1, True, (1, 2))]),
+            [],
+            1,
+        ),
+        (
+            _IndentLine(3, [_IndentPoint(6, 0, 0, 3, 1, True, (2,))]),
+            # Forced indent takes us back up.
+            [2],
+            3,
+        ),
+        (
+            _IndentLine(3, [_IndentPoint(6, 0, 0, 3, 1, True, (3,))]),
+            [],
+            2,
+        ),
+        (
+            _IndentLine(3, [_IndentPoint(6, 0, -1, 3, 1, True, (3,))]),
+            # Untaken indent is pruned by trough.
+            [],
+            3,
+        ),
+    ],
+)
+def test_reflow__desired_indent_units(indent_line, forced_indents, expected_units):
+    """Test _IndentLine.desired_indent_units() directly."""
+    assert indent_line.desired_indent_units(forced_indents) == expected_units
diff --git a/tox.ini b/tox.ini
index cda54df..14facc8 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-envlist = generate-fixture-yml, linting, doclinting, ruleslinting, docbuild, cov-init, doctests, py{37,38,39,310}, dbt{017,018,019,020,021,100,130}-py{37,38,39,310}, cov-report, bench, mypy, winpy, dbt{017,018,019,020,021,100,130}-winpy, yamllint
+envlist = generate-fixture-yml, linting, doclinting, ruleslinting, docbuild, cov-init, doctests, py{37,38,39,310}, dbt{110,140}-py{37,38,39,310}, cov-report, bench, mypy, winpy, dbt{110,140}-winpy, yamllint
 
 [testenv]
 passenv = CI, CIRCLECI, CIRCLE_*, HOME, SQLFLUFF_BENCHMARK_API_KEY
@@ -11,30 +11,39 @@ setenv =
     COVERAGE_FILE = .coverage.{envname}
     winpy: TMPDIR = temp_pytest
     # Constrain dbt versions
-    dbt{020,021,100,130}: PIP_CONSTRAINT=constraints/{envname}.txt
+    dbt{110,140,}: PIP_CONSTRAINT=constraints/{envname}.txt
 allowlist_externals =
     make
-pip_pre = true
+pip_pre = false
 deps =
     -rrequirements_dev.txt
-    # Add the example plugin
-    plugins/sqlfluff-plugin-example
-    dbt{020,021,100,130}: dbt-core
-    dbt{020,021,100,130}: dbt-postgres
+    dbt{110,140,}: dbt-core
+    dbt{110,140,}: dbt-postgres
 # Include any other steps necessary for testing below.
 # {posargs} is there to allow us to specify specific tests, which
 # can then be invoked from tox by calling e.g.
 # tox -e py35 -- project/tests/test_file.py::TestClassName::test_method
 commands =
-    # Install the plugins as required
-    dbt{020,021,100,130}: python -m pip install {toxinidir}/plugins/sqlfluff-templater-dbt
+    # Install the plugins as required.
+    # NOTE: We do them here, so that when version numbers update, we don't
+    # get install errors for version conflicts. The dbt templater has a version
+    # number pinned to the same version number of the main sqlfluff library
+    # so it _must_ be installed second in the context of a version which isn't
+    # yet released (and so not available on pypi).
+    dbt{110,140,}: python -m pip install {toxinidir}/plugins/sqlfluff-templater-dbt
+    # Add the example plugin.
+    # NOTE: The trailing comma is important because in the github test suite
+    # the python version is not specified and instead the "py" environment
+    # is invoked. Leaving the trailing comma ensures that this environment
+    # still installs the relevant plugins.
+    {py,winpy}{37,38,39,310,311,}: python -m pip install {toxinidir}/plugins/sqlfluff-plugin-example
     # For the dbt test cases install dependencies.
-    python {toxinidir}/plugins/sqlfluff-templater-dbt/test/generate_packages_yml.py {toxinidir}/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project
-    dbt{020,021,100,130}: dbt deps --project-dir {toxinidir}/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project --profiles-dir {toxinidir}/plugins/sqlfluff-templater-dbt/test/fixtures/dbt
+    dbt{110,140,}: dbt deps --project-dir {toxinidir}/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project --profiles-dir {toxinidir}/plugins/sqlfluff-templater-dbt/test/fixtures/dbt
     # Clean up from previous tests
     python {toxinidir}/util.py clean-tests
     # Run tests
-    pytest -vv -rsfE --cov=sqlfluff --cov-report=xml {posargs: {toxinidir}/test} -m "not integration_test"
+    pytest -vv -rsfE --cov-report=lcov {posargs: {toxinidir}/test}
+    python test/patch_lcov.py
 
 [testenv:cov-init]
 setenv =
@@ -67,13 +76,12 @@ commands = python {toxinidir}/test/generate_parse_fixture_yml.py {posargs}
 
 [testenv:linting]
 skip_install = true
-commands = flake8
-
-[testenv:ruleslinting]
-commands = pytest -vv -rsfE --cov=sqlfluff --cov-report=xml {posargs: {toxinidir}/test} -m "integration_test"
+commands =
+    flake8
+    ruff check .
 
 [testenv:doctests]
-commands = pytest -vv -rsfE --cov=sqlfluff --cov-report=xml --doctest-modules {posargs: {toxinidir}/src}
+commands = pytest -vv -rsfE --doctest-modules {posargs: {toxinidir}/src}
 
 [testenv:yamllint]
 skip_install = true
@@ -81,8 +89,13 @@ deps = yamllint
 commands = yamllint -c .yamllint .
 
 [testenv:doclinting]
-skip_install = true
-commands = doc8 {toxinidir}/docs/source --file-encoding utf8
+deps =
+    -rdocs/requirements.txt
+commands =
+    # Before linting, generate the rule docs.
+    # If we don't we get import errors.
+    python {toxinidir}/docs/generate-rule-docs.py
+    doc8 {toxinidir}/docs/source --file-encoding utf8
 
 [testenv:docbuild]
 deps =
@@ -128,7 +141,7 @@ commands =
 # D107: Don't require docstrings on __init__
 # D105: Don't require docstrings on magic methods
 ignore = W503, D107, D105, D418
-exclude = .git,__pycache__,env,.tox,build,.venv,venv,.coverage.py
+exclude = .git,__pycache__,env,.tox,build,.venv,venv,.coverage.py,plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/osmosis/*.py
 max-line-length = 88
 extend-ignore =
     # See https://github.com/PyCQA/pycodestyle/issues/373
@@ -143,9 +156,25 @@ testpaths = test
 
 [coverage:run]
 source = src/sqlfluff
-omit = src/sqlfluff/__main__.py
+omit =
+    src/sqlfluff/__main__.py
+    plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/osmosis/*.py
 
 [coverage:report]
 exclude_lines =
     sys.version_info
     pragma: no cover
+
+[coverage:paths]
+source =
+    # Local path
+    src/
+    # These are the Github likely source paths
+    D:\a\sqlfluff\sqlfluff\src\
+    D:\a\sqlfluff\sqlfluff\.tox\winpy\Lib\site-packages\
+    /home/runner/work/sqlfluff/sqlfluff/src/
+    /home/runner/work/sqlfluff/sqlfluff/.tox/*/lib/*/site-packages/
+
+[doc8]
+# Ignore auto-generated docs
+ignore-path=docs/source/partials/
diff --git a/util.py b/util.py
index 55e8fa7..52ac337 100644
--- a/util.py
+++ b/util.py
@@ -121,8 +121,8 @@ def benchmark(cmd, runs, from_file):
 
 
 @cli.command()
-@click.option("--new_version_num")
-def prepare_release(new_version_num):
+@click.argument("new_version_num")
+def release(new_version_num):
     """Change version number in the cfg files."""
     api = GhApi(
         owner=os.environ["GITHUB_REPOSITORY_OWNER"],

Debdiff

[The following lists of changes regard files as different if they have different names, permissions or owners.]

Files in second set of .debs but not in first

-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff-2.0.5.egg-info/PKG-INFO
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff-2.0.5.egg-info/dependency_links.txt
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff-2.0.5.egg-info/entry_points.txt
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff-2.0.5.egg-info/requires.txt
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff-2.0.5.egg-info/top_level.txt
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/core/slice_helpers.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/dialects/dialect_databricks.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/dialects/dialect_databricks_keywords.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/dialects/dialect_db2_keywords.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/dialects/dialect_duckdb.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/dialects/dialect_greenplum.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/dialects/dialect_sqlite_keywords.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/aliasing/AL01.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/aliasing/AL02.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/aliasing/AL03.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/aliasing/AL04.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/aliasing/AL05.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/aliasing/AL06.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/aliasing/AL07.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/aliasing/__init__.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/ambiguous/AM01.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/ambiguous/AM02.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/ambiguous/AM03.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/ambiguous/AM04.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/ambiguous/AM05.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/ambiguous/AM06.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/ambiguous/AM07.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/ambiguous/__init__.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/capitalisation/CP01.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/capitalisation/CP02.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/capitalisation/CP03.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/capitalisation/CP04.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/capitalisation/CP05.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/capitalisation/__init__.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/convention/CV01.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/convention/CV02.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/convention/CV03.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/convention/CV04.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/convention/CV05.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/convention/CV06.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/convention/CV07.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/convention/CV08.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/convention/CV09.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/convention/CV10.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/convention/CV11.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/convention/__init__.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/jinja/JJ01.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/jinja/__init__.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/layout/LT01.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/layout/LT02.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/layout/LT03.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/layout/LT04.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/layout/LT05.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/layout/LT06.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/layout/LT07.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/layout/LT08.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/layout/LT09.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/layout/LT10.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/layout/LT11.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/layout/LT12.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/layout/LT13.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/layout/__init__.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/references/RF01.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/references/RF02.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/references/RF03.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/references/RF04.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/references/RF05.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/references/RF06.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/references/__init__.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/structure/ST01.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/structure/ST02.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/structure/ST03.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/structure/ST04.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/structure/ST05.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/structure/ST06.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/structure/ST07.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/structure/ST08.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/structure/__init__.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/tsql/TQ01.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/tsql/__init__.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/utils/identifers.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/utils/testing/logging.py
-rw-r--r--  root/root   /usr/share/doc/sqlfluff/html/_sources/releasenotes.rst.txt
-rw-r--r--  root/root   /usr/share/doc/sqlfluff/html/releasenotes.html

Files in first set of .debs but not in second

-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff-1.4.5.egg-info/PKG-INFO
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff-1.4.5.egg-info/dependency_links.txt
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff-1.4.5.egg-info/entry_points.txt
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff-1.4.5.egg-info/requires.txt
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff-1.4.5.egg-info/top_level.txt
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L001.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L002.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L003.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L004.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L005.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L006.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L007.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L008.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L009.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L010.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L011.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L012.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L013.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L014.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L015.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L016.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L017.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L018.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L019.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L020.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L021.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L022.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L023.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L024.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L025.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L026.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L027.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L028.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L029.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L030.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L031.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L032.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L033.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L034.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L035.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L036.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L037.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L038.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L039.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L040.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L041.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L042.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L043.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L044.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L045.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L046.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L047.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L048.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L049.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L050.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L051.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L052.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L053.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L054.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L055.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L056.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L057.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L058.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L059.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L060.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L061.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L062.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L063.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L064.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L065.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L066.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L067.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L068.py
-rw-r--r--  root/root   /usr/lib/python3/dist-packages/sqlfluff/rules/L071.py

Control files of package sqlfluff: lines which differ (wdiff format)

  • Depends: python3-appdirs, python3-chardet, python3-click, python3-colorama, python3-importlib-metadata | python3 (>> 3.8), python3-jinja2, python3-pathspec, python3-pytest, python3-regex, python3-tblib, python3-toml, python3-toml | python3 (>> 3.11), python3-tqdm, python3-typing-extensions, python3-yaml, python3:any

No differences were encountered between the control files of package sqlfluff-doc

More details

Full run details